From df2404b6a55737aa85e469386b676bcea16e995e Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 20 Nov 2018 11:29:17 +0100 Subject: [PATCH 1/3] VmaReplay: Added support for Windows end of lines. --- src/VmaReplay/Common.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/VmaReplay/Common.cpp b/src/VmaReplay/Common.cpp index c324245..515c1ce 100644 --- a/src/VmaReplay/Common.cpp +++ b/src/VmaReplay/Common.cpp @@ -12,6 +12,11 @@ bool LineSplit::GetNextLine(StrRange& out) while(currLineEnd < m_NumBytes && m_Data[currLineEnd] != '\n') ++currLineEnd; out.end = m_Data + currLineEnd; + // Ignore trailing '\r' to support Windows end of line. + if(out.end > out.beg && *(out.end - 1) == '\r') + { + --out.end; + } m_NextLineBeg = currLineEnd + 1; // Past '\n' ++m_NextLineIndex; return true; From 3b392258e26b5d0b4fa895ebb2434e1872e8d4e6 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 20 Nov 2018 11:33:07 +0100 Subject: [PATCH 2/3] Documentation: Added mention of VK_AMD_memory_overallocation_behavior extension. --- .../struct_vma_allocator_create_info.html | 2 +- docs/html/vk__mem__alloc_8h_source.html | 176 +++++++++--------- src/vk_mem_alloc.h | 3 +- 3 files changed, 91 insertions(+), 90 deletions(-) diff --git a/docs/html/struct_vma_allocator_create_info.html b/docs/html/struct_vma_allocator_create_info.html index 25fd89f..564e562 100644 --- a/docs/html/struct_vma_allocator_create_info.html +++ b/docs/html/struct_vma_allocator_create_info.html @@ -216,7 +216,7 @@ Public Attributes
  • If user tries to allocate more memory from that heap using this allocator, the allocation fails with VK_ERROR_OUT_OF_DEVICE_MEMORY.
  • If the limit is smaller than heap size reported in VkMemoryHeap::size, the value of this limit will be reported instead when using vmaGetMemoryProperties().
  • -

    Warning! Using this feature may not be equivalent to installing a GPU with smaller amount of memory, because graphics driver doesn't necessary fail new allocations with VK_ERROR_OUT_OF_DEVICE_MEMORY result when memory capacity is exceeded. It may return success and just silently migrate some device memory blocks to system RAM.

    +

    Warning! Using this feature may not be equivalent to installing a GPU with smaller amount of memory, because graphics driver doesn't necessary fail new allocations with VK_ERROR_OUT_OF_DEVICE_MEMORY result when memory capacity is exceeded. It may return success and just silently migrate some device memory blocks to system RAM. This driver behavior can also be controlled using VK_AMD_memory_overallocation_behavior extension.

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 96303b9..bd37171 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,189 +65,189 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1693  const VkDeviceSize* pHeapSizeLimit;
    1714 
    1716 VkResult vmaCreateAllocator(
    1717  const VmaAllocatorCreateInfo* pCreateInfo,
    1718  VmaAllocator* pAllocator);
    1719 
    1721 void vmaDestroyAllocator(
    1722  VmaAllocator allocator);
    1723 
    1729  VmaAllocator allocator,
    1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1731 
    1737  VmaAllocator allocator,
    1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1739 
    1747  VmaAllocator allocator,
    1748  uint32_t memoryTypeIndex,
    1749  VkMemoryPropertyFlags* pFlags);
    1750 
    1760  VmaAllocator allocator,
    1761  uint32_t frameIndex);
    1762 
    1765 typedef struct VmaStatInfo
    1766 {
    1768  uint32_t blockCount;
    1774  VkDeviceSize usedBytes;
    1776  VkDeviceSize unusedBytes;
    1779 } VmaStatInfo;
    1780 
    1782 typedef struct VmaStats
    1783 {
    1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1787 } VmaStats;
    1788 
    1790 void vmaCalculateStats(
    1791  VmaAllocator allocator,
    1792  VmaStats* pStats);
    1793 
    1794 #define VMA_STATS_STRING_ENABLED 1
    1795 
    1796 #if VMA_STATS_STRING_ENABLED
    1797 
    1799 
    1801 void vmaBuildStatsString(
    1802  VmaAllocator allocator,
    1803  char** ppStatsString,
    1804  VkBool32 detailedMap);
    1805 
    1806 void vmaFreeStatsString(
    1807  VmaAllocator allocator,
    1808  char* pStatsString);
    1809 
    1810 #endif // #if VMA_STATS_STRING_ENABLED
    1811 
    1820 VK_DEFINE_HANDLE(VmaPool)
    1821 
    1822 typedef enum VmaMemoryUsage
    1823 {
    1872 } VmaMemoryUsage;
    1873 
    1888 
    1943 
    1959 
    1969 
    1976 
    1980 
    1982 {
    1995  VkMemoryPropertyFlags requiredFlags;
    2000  VkMemoryPropertyFlags preferredFlags;
    2008  uint32_t memoryTypeBits;
    2021  void* pUserData;
    2023 
    2040 VkResult vmaFindMemoryTypeIndex(
    2041  VmaAllocator allocator,
    2042  uint32_t memoryTypeBits,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkBufferCreateInfo* pBufferCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2077  VmaAllocator allocator,
    2078  const VkImageCreateInfo* pImageCreateInfo,
    2079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2080  uint32_t* pMemoryTypeIndex);
    2081 
    2102 
    2119 
    2130 
    2136 
    2139 typedef VkFlags VmaPoolCreateFlags;
    2140 
    2143 typedef struct VmaPoolCreateInfo {
    2158  VkDeviceSize blockSize;
    2187 
    2190 typedef struct VmaPoolStats {
    2193  VkDeviceSize size;
    2196  VkDeviceSize unusedSize;
    2209  VkDeviceSize unusedRangeSizeMax;
    2212  size_t blockCount;
    2213 } VmaPoolStats;
    2214 
    2221 VkResult vmaCreatePool(
    2222  VmaAllocator allocator,
    2223  const VmaPoolCreateInfo* pCreateInfo,
    2224  VmaPool* pPool);
    2225 
    2228 void vmaDestroyPool(
    2229  VmaAllocator allocator,
    2230  VmaPool pool);
    2231 
    2238 void vmaGetPoolStats(
    2239  VmaAllocator allocator,
    2240  VmaPool pool,
    2241  VmaPoolStats* pPoolStats);
    2242 
    2250  VmaAllocator allocator,
    2251  VmaPool pool,
    2252  size_t* pLostAllocationCount);
    2253 
    2268 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2269 
    2294 VK_DEFINE_HANDLE(VmaAllocation)
    2295 
    2296 
    2298 typedef struct VmaAllocationInfo {
    2303  uint32_t memoryType;
    2312  VkDeviceMemory deviceMemory;
    2317  VkDeviceSize offset;
    2322  VkDeviceSize size;
    2336  void* pUserData;
    2338 
    2349 VkResult vmaAllocateMemory(
    2350  VmaAllocator allocator,
    2351  const VkMemoryRequirements* pVkMemoryRequirements,
    2352  const VmaAllocationCreateInfo* pCreateInfo,
    2353  VmaAllocation* pAllocation,
    2354  VmaAllocationInfo* pAllocationInfo);
    2355 
    2363  VmaAllocator allocator,
    2364  VkBuffer buffer,
    2365  const VmaAllocationCreateInfo* pCreateInfo,
    2366  VmaAllocation* pAllocation,
    2367  VmaAllocationInfo* pAllocationInfo);
    2368 
    2370 VkResult vmaAllocateMemoryForImage(
    2371  VmaAllocator allocator,
    2372  VkImage image,
    2373  const VmaAllocationCreateInfo* pCreateInfo,
    2374  VmaAllocation* pAllocation,
    2375  VmaAllocationInfo* pAllocationInfo);
    2376 
    2378 void vmaFreeMemory(
    2379  VmaAllocator allocator,
    2380  VmaAllocation allocation);
    2381 
    2402 VkResult vmaResizeAllocation(
    2403  VmaAllocator allocator,
    2404  VmaAllocation allocation,
    2405  VkDeviceSize newSize);
    2406 
    2424  VmaAllocator allocator,
    2425  VmaAllocation allocation,
    2426  VmaAllocationInfo* pAllocationInfo);
    2427 
    2442 VkBool32 vmaTouchAllocation(
    2443  VmaAllocator allocator,
    2444  VmaAllocation allocation);
    2445 
    2460  VmaAllocator allocator,
    2461  VmaAllocation allocation,
    2462  void* pUserData);
    2463 
    2475  VmaAllocator allocator,
    2476  VmaAllocation* pAllocation);
    2477 
    2512 VkResult vmaMapMemory(
    2513  VmaAllocator allocator,
    2514  VmaAllocation allocation,
    2515  void** ppData);
    2516 
    2521 void vmaUnmapMemory(
    2522  VmaAllocator allocator,
    2523  VmaAllocation allocation);
    2524 
    2537 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2538 
    2551 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2552 
    2569 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2570 
    2572 typedef struct VmaDefragmentationInfo {
    2577  VkDeviceSize maxBytesToMove;
    2584 
    2586 typedef struct VmaDefragmentationStats {
    2588  VkDeviceSize bytesMoved;
    2590  VkDeviceSize bytesFreed;
    2596 
    2635 VkResult vmaDefragment(
    2636  VmaAllocator allocator,
    2637  VmaAllocation* pAllocations,
    2638  size_t allocationCount,
    2639  VkBool32* pAllocationsChanged,
    2640  const VmaDefragmentationInfo *pDefragmentationInfo,
    2641  VmaDefragmentationStats* pDefragmentationStats);
    2642 
    2655 VkResult vmaBindBufferMemory(
    2656  VmaAllocator allocator,
    2657  VmaAllocation allocation,
    2658  VkBuffer buffer);
    2659 
    2672 VkResult vmaBindImageMemory(
    2673  VmaAllocator allocator,
    2674  VmaAllocation allocation,
    2675  VkImage image);
    2676 
    2703 VkResult vmaCreateBuffer(
    2704  VmaAllocator allocator,
    2705  const VkBufferCreateInfo* pBufferCreateInfo,
    2706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2707  VkBuffer* pBuffer,
    2708  VmaAllocation* pAllocation,
    2709  VmaAllocationInfo* pAllocationInfo);
    2710 
    2722 void vmaDestroyBuffer(
    2723  VmaAllocator allocator,
    2724  VkBuffer buffer,
    2725  VmaAllocation allocation);
    2726 
    2728 VkResult vmaCreateImage(
    2729  VmaAllocator allocator,
    2730  const VkImageCreateInfo* pImageCreateInfo,
    2731  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2732  VkImage* pImage,
    2733  VmaAllocation* pAllocation,
    2734  VmaAllocationInfo* pAllocationInfo);
    2735 
    2747 void vmaDestroyImage(
    2748  VmaAllocator allocator,
    2749  VkImage image,
    2750  VmaAllocation allocation);
    2751 
    2752 #ifdef __cplusplus
    2753 }
    2754 #endif
    2755 
    2756 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2757 
    2758 // For Visual Studio IntelliSense.
    2759 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2760 #define VMA_IMPLEMENTATION
    2761 #endif
    2762 
    2763 #ifdef VMA_IMPLEMENTATION
    2764 #undef VMA_IMPLEMENTATION
    2765 
    2766 #include <cstdint>
    2767 #include <cstdlib>
    2768 #include <cstring>
    2769 
    2770 /*******************************************************************************
    2771 CONFIGURATION SECTION
    2772 
    2773 Define some of these macros before each #include of this header or change them
    2774 here if you need other then default behavior depending on your environment.
    2775 */
    2776 
    2777 /*
    2778 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2779 internally, like:
    2780 
    2781  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2782 
    2783 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2784 VmaAllocatorCreateInfo::pVulkanFunctions.
    2785 */
    2786 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2787 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2788 #endif
    2789 
    2790 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2791 //#define VMA_USE_STL_CONTAINERS 1
    2792 
    2793 /* Set this macro to 1 to make the library including and using STL containers:
    2794 std::pair, std::vector, std::list, std::unordered_map.
    2795 
    2796 Set it to 0 or undefined to make the library using its own implementation of
    2797 the containers.
    2798 */
    2799 #if VMA_USE_STL_CONTAINERS
    2800  #define VMA_USE_STL_VECTOR 1
    2801  #define VMA_USE_STL_UNORDERED_MAP 1
    2802  #define VMA_USE_STL_LIST 1
    2803 #endif
    2804 
    2805 #if VMA_USE_STL_VECTOR
    2806  #include <vector>
    2807 #endif
    2808 
    2809 #if VMA_USE_STL_UNORDERED_MAP
    2810  #include <unordered_map>
    2811 #endif
    2812 
    2813 #if VMA_USE_STL_LIST
    2814  #include <list>
    2815 #endif
    2816 
    2817 /*
    2818 Following headers are used in this CONFIGURATION section only, so feel free to
    2819 remove them if not needed.
    2820 */
    2821 #include <cassert> // for assert
    2822 #include <algorithm> // for min, max
    2823 #include <mutex> // for std::mutex
    2824 #include <atomic> // for std::atomic
    2825 
    2826 #ifndef VMA_NULL
    2827  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2828  #define VMA_NULL nullptr
    2829 #endif
    2830 
    2831 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2832 #include <cstdlib>
    2833 void *aligned_alloc(size_t alignment, size_t size)
    2834 {
    2835  // alignment must be >= sizeof(void*)
    2836  if(alignment < sizeof(void*))
    2837  {
    2838  alignment = sizeof(void*);
    2839  }
    2840 
    2841  return memalign(alignment, size);
    2842 }
    2843 #elif defined(__APPLE__) || defined(__ANDROID__)
    2844 #include <cstdlib>
    2845 void *aligned_alloc(size_t alignment, size_t size)
    2846 {
    2847  // alignment must be >= sizeof(void*)
    2848  if(alignment < sizeof(void*))
    2849  {
    2850  alignment = sizeof(void*);
    2851  }
    2852 
    2853  void *pointer;
    2854  if(posix_memalign(&pointer, alignment, size) == 0)
    2855  return pointer;
    2856  return VMA_NULL;
    2857 }
    2858 #endif
    2859 
    2860 // If your compiler is not compatible with C++11 and definition of
    2861 // aligned_alloc() function is missing, uncommeting following line may help:
    2862 
    2863 //#include <malloc.h>
    2864 
    2865 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2866 #ifndef VMA_ASSERT
    2867  #ifdef _DEBUG
    2868  #define VMA_ASSERT(expr) assert(expr)
    2869  #else
    2870  #define VMA_ASSERT(expr)
    2871  #endif
    2872 #endif
    2873 
    2874 // Assert that will be called very often, like inside data structures e.g. operator[].
    2875 // Making it non-empty can make program slow.
    2876 #ifndef VMA_HEAVY_ASSERT
    2877  #ifdef _DEBUG
    2878  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2879  #else
    2880  #define VMA_HEAVY_ASSERT(expr)
    2881  #endif
    2882 #endif
    2883 
    2884 #ifndef VMA_ALIGN_OF
    2885  #define VMA_ALIGN_OF(type) (__alignof(type))
    2886 #endif
    2887 
    2888 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2889  #if defined(_WIN32)
    2890  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2891  #else
    2892  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2893  #endif
    2894 #endif
    2895 
    2896 #ifndef VMA_SYSTEM_FREE
    2897  #if defined(_WIN32)
    2898  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2899  #else
    2900  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2901  #endif
    2902 #endif
    2903 
    2904 #ifndef VMA_MIN
    2905  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2906 #endif
    2907 
    2908 #ifndef VMA_MAX
    2909  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2910 #endif
    2911 
    2912 #ifndef VMA_SWAP
    2913  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2914 #endif
    2915 
    2916 #ifndef VMA_SORT
    2917  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2918 #endif
    2919 
    2920 #ifndef VMA_DEBUG_LOG
    2921  #define VMA_DEBUG_LOG(format, ...)
    2922  /*
    2923  #define VMA_DEBUG_LOG(format, ...) do { \
    2924  printf(format, __VA_ARGS__); \
    2925  printf("\n"); \
    2926  } while(false)
    2927  */
    2928 #endif
    2929 
    2930 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2931 #if VMA_STATS_STRING_ENABLED
    2932  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2933  {
    2934  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2935  }
    2936  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2937  {
    2938  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2939  }
    2940  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2941  {
    2942  snprintf(outStr, strLen, "%p", ptr);
    2943  }
    2944 #endif
    2945 
    2946 #ifndef VMA_MUTEX
    2947  class VmaMutex
    2948  {
    2949  public:
    2950  VmaMutex() { }
    2951  ~VmaMutex() { }
    2952  void Lock() { m_Mutex.lock(); }
    2953  void Unlock() { m_Mutex.unlock(); }
    2954  private:
    2955  std::mutex m_Mutex;
    2956  };
    2957  #define VMA_MUTEX VmaMutex
    2958 #endif
    2959 
    2960 /*
    2961 If providing your own implementation, you need to implement a subset of std::atomic:
    2962 
    2963 - Constructor(uint32_t desired)
    2964 - uint32_t load() const
    2965 - void store(uint32_t desired)
    2966 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2967 */
    2968 #ifndef VMA_ATOMIC_UINT32
    2969  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2970 #endif
    2971 
    2972 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2973 
    2977  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2978 #endif
    2979 
    2980 #ifndef VMA_DEBUG_ALIGNMENT
    2981 
    2985  #define VMA_DEBUG_ALIGNMENT (1)
    2986 #endif
    2987 
    2988 #ifndef VMA_DEBUG_MARGIN
    2989 
    2993  #define VMA_DEBUG_MARGIN (0)
    2994 #endif
    2995 
    2996 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2997 
    3001  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3002 #endif
    3003 
    3004 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3005 
    3010  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3011 #endif
    3012 
    3013 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3014 
    3018  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3019 #endif
    3020 
    3021 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3022 
    3026  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3027 #endif
    3028 
    3029 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3030  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3035  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3037 #endif
    3038 
    3039 #ifndef VMA_CLASS_NO_COPY
    3040  #define VMA_CLASS_NO_COPY(className) \
    3041  private: \
    3042  className(const className&) = delete; \
    3043  className& operator=(const className&) = delete;
    3044 #endif
    3045 
    3046 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3047 
    3048 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3049 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3050 
    3051 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3053 
    3054 /*******************************************************************************
    3055 END OF CONFIGURATION
    3056 */
    3057 
    3058 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3059  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3060 
    3061 // Returns number of bits set to 1 in (v).
    3062 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3063 {
    3064  uint32_t c = v - ((v >> 1) & 0x55555555);
    3065  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3066  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3067  c = ((c >> 8) + c) & 0x00FF00FF;
    3068  c = ((c >> 16) + c) & 0x0000FFFF;
    3069  return c;
    3070 }
    3071 
    3072 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3073 // Use types like uint32_t, uint64_t as T.
    3074 template <typename T>
    3075 static inline T VmaAlignUp(T val, T align)
    3076 {
    3077  return (val + align - 1) / align * align;
    3078 }
    3079 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3080 // Use types like uint32_t, uint64_t as T.
    3081 template <typename T>
    3082 static inline T VmaAlignDown(T val, T align)
    3083 {
    3084  return val / align * align;
    3085 }
    3086 
    3087 // Division with mathematical rounding to nearest number.
    3088 template <typename T>
    3089 static inline T VmaRoundDiv(T x, T y)
    3090 {
    3091  return (x + (y / (T)2)) / y;
    3092 }
    3093 
    3094 /*
    3095 Returns true if given number is a power of two.
    3096 T must be unsigned integer number or signed integer but always nonnegative.
    3097 For 0 returns true.
    3098 */
    3099 template <typename T>
    3100 inline bool VmaIsPow2(T x)
    3101 {
    3102  return (x & (x-1)) == 0;
    3103 }
    3104 
    3105 // Returns smallest power of 2 greater or equal to v.
    3106 static inline uint32_t VmaNextPow2(uint32_t v)
    3107 {
    3108  v--;
    3109  v |= v >> 1;
    3110  v |= v >> 2;
    3111  v |= v >> 4;
    3112  v |= v >> 8;
    3113  v |= v >> 16;
    3114  v++;
    3115  return v;
    3116 }
    3117 static inline uint64_t VmaNextPow2(uint64_t v)
    3118 {
    3119  v--;
    3120  v |= v >> 1;
    3121  v |= v >> 2;
    3122  v |= v >> 4;
    3123  v |= v >> 8;
    3124  v |= v >> 16;
    3125  v |= v >> 32;
    3126  v++;
    3127  return v;
    3128 }
    3129 
    3130 // Returns largest power of 2 less or equal to v.
    3131 static inline uint32_t VmaPrevPow2(uint32_t v)
    3132 {
    3133  v |= v >> 1;
    3134  v |= v >> 2;
    3135  v |= v >> 4;
    3136  v |= v >> 8;
    3137  v |= v >> 16;
    3138  v = v ^ (v >> 1);
    3139  return v;
    3140 }
    3141 static inline uint64_t VmaPrevPow2(uint64_t v)
    3142 {
    3143  v |= v >> 1;
    3144  v |= v >> 2;
    3145  v |= v >> 4;
    3146  v |= v >> 8;
    3147  v |= v >> 16;
    3148  v |= v >> 32;
    3149  v = v ^ (v >> 1);
    3150  return v;
    3151 }
    3152 
    3153 static inline bool VmaStrIsEmpty(const char* pStr)
    3154 {
    3155  return pStr == VMA_NULL || *pStr == '\0';
    3156 }
    3157 
    3158 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3159 {
    3160  switch(algorithm)
    3161  {
    3163  return "Linear";
    3165  return "Buddy";
    3166  case 0:
    3167  return "Default";
    3168  default:
    3169  VMA_ASSERT(0);
    3170  return "";
    3171  }
    3172 }
    3173 
    3174 #ifndef VMA_SORT
    3175 
    3176 template<typename Iterator, typename Compare>
    3177 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3178 {
    3179  Iterator centerValue = end; --centerValue;
    3180  Iterator insertIndex = beg;
    3181  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3182  {
    3183  if(cmp(*memTypeIndex, *centerValue))
    3184  {
    3185  if(insertIndex != memTypeIndex)
    3186  {
    3187  VMA_SWAP(*memTypeIndex, *insertIndex);
    3188  }
    3189  ++insertIndex;
    3190  }
    3191  }
    3192  if(insertIndex != centerValue)
    3193  {
    3194  VMA_SWAP(*insertIndex, *centerValue);
    3195  }
    3196  return insertIndex;
    3197 }
    3198 
    3199 template<typename Iterator, typename Compare>
    3200 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3201 {
    3202  if(beg < end)
    3203  {
    3204  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3205  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3206  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3207  }
    3208 }
    3209 
    3210 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3211 
    3212 #endif // #ifndef VMA_SORT
    3213 
    3214 /*
    3215 Returns true if two memory blocks occupy overlapping pages.
    3216 ResourceA must be in less memory offset than ResourceB.
    3217 
    3218 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3219 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3220 */
    3221 static inline bool VmaBlocksOnSamePage(
    3222  VkDeviceSize resourceAOffset,
    3223  VkDeviceSize resourceASize,
    3224  VkDeviceSize resourceBOffset,
    3225  VkDeviceSize pageSize)
    3226 {
    3227  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3228  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3229  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3230  VkDeviceSize resourceBStart = resourceBOffset;
    3231  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3232  return resourceAEndPage == resourceBStartPage;
    3233 }
    3234 
    3235 enum VmaSuballocationType
    3236 {
    3237  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3238  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3239  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3240  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3243  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3244 };
    3245 
    3246 /*
    3247 Returns true if given suballocation types could conflict and must respect
    3248 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3249 or linear image and another one is optimal image. If type is unknown, behave
    3250 conservatively.
    3251 */
    3252 static inline bool VmaIsBufferImageGranularityConflict(
    3253  VmaSuballocationType suballocType1,
    3254  VmaSuballocationType suballocType2)
    3255 {
    3256  if(suballocType1 > suballocType2)
    3257  {
    3258  VMA_SWAP(suballocType1, suballocType2);
    3259  }
    3260 
    3261  switch(suballocType1)
    3262  {
    3263  case VMA_SUBALLOCATION_TYPE_FREE:
    3264  return false;
    3265  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3266  return true;
    3267  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3268  return
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3276  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3277  return
    3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3279  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3280  return false;
    3281  default:
    3282  VMA_ASSERT(0);
    3283  return true;
    3284  }
    3285 }
    3286 
    3287 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3288 {
    3289  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3290  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3291  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3292  {
    3293  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3294  }
    3295 }
    3296 
    3297 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3298 {
    3299  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3300  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3301  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3302  {
    3303  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3304  {
    3305  return false;
    3306  }
    3307  }
    3308  return true;
    3309 }
    3310 
    3311 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3312 struct VmaMutexLock
    3313 {
    3314  VMA_CLASS_NO_COPY(VmaMutexLock)
    3315 public:
    3316  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3317  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3318  {
    3319  if(m_pMutex)
    3320  {
    3321  m_pMutex->Lock();
    3322  }
    3323  }
    3324 
    3325  ~VmaMutexLock()
    3326  {
    3327  if(m_pMutex)
    3328  {
    3329  m_pMutex->Unlock();
    3330  }
    3331  }
    3332 
    3333 private:
    3334  VMA_MUTEX* m_pMutex;
    3335 };
    3336 
    3337 #if VMA_DEBUG_GLOBAL_MUTEX
    3338  static VMA_MUTEX gDebugGlobalMutex;
    3339  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3340 #else
    3341  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3342 #endif
    3343 
    3344 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3345 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3346 
    3347 /*
    3348 Performs binary search and returns iterator to first element that is greater or
    3349 equal to (key), according to comparison (cmp).
    3350 
    3351 Cmp should return true if first argument is less than second argument.
    3352 
    3353 Returned value is the found element, if present in the collection or place where
    3354 new element with value (key) should be inserted.
    3355 */
    3356 template <typename CmpLess, typename IterT, typename KeyT>
    3357 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3358 {
    3359  size_t down = 0, up = (end - beg);
    3360  while(down < up)
    3361  {
    3362  const size_t mid = (down + up) / 2;
    3363  if(cmp(*(beg+mid), key))
    3364  {
    3365  down = mid + 1;
    3366  }
    3367  else
    3368  {
    3369  up = mid;
    3370  }
    3371  }
    3372  return beg + down;
    3373 }
    3374 
    3376 // Memory allocation
    3377 
    3378 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3379 {
    3380  if((pAllocationCallbacks != VMA_NULL) &&
    3381  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3382  {
    3383  return (*pAllocationCallbacks->pfnAllocation)(
    3384  pAllocationCallbacks->pUserData,
    3385  size,
    3386  alignment,
    3387  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3388  }
    3389  else
    3390  {
    3391  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3392  }
    3393 }
    3394 
    3395 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3396 {
    3397  if((pAllocationCallbacks != VMA_NULL) &&
    3398  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3399  {
    3400  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3401  }
    3402  else
    3403  {
    3404  VMA_SYSTEM_FREE(ptr);
    3405  }
    3406 }
    3407 
    3408 template<typename T>
    3409 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3410 {
    3411  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3412 }
    3413 
    3414 template<typename T>
    3415 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3416 {
    3417  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3418 }
    3419 
    3420 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3421 
    3422 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3423 
    3424 template<typename T>
    3425 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3426 {
    3427  ptr->~T();
    3428  VmaFree(pAllocationCallbacks, ptr);
    3429 }
    3430 
    3431 template<typename T>
    3432 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3433 {
    3434  if(ptr != VMA_NULL)
    3435  {
    3436  for(size_t i = count; i--; )
    3437  {
    3438  ptr[i].~T();
    3439  }
    3440  VmaFree(pAllocationCallbacks, ptr);
    3441  }
    3442 }
    3443 
    3444 // STL-compatible allocator.
    3445 template<typename T>
    3446 class VmaStlAllocator
    3447 {
    3448 public:
    3449  const VkAllocationCallbacks* const m_pCallbacks;
    3450  typedef T value_type;
    3451 
    3452  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3453  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3454 
    3455  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3456  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3457 
    3458  template<typename U>
    3459  bool operator==(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks == rhs.m_pCallbacks;
    3462  }
    3463  template<typename U>
    3464  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3465  {
    3466  return m_pCallbacks != rhs.m_pCallbacks;
    3467  }
    3468 
    3469  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3470 };
    3471 
    3472 #if VMA_USE_STL_VECTOR
    3473 
    3474 #define VmaVector std::vector
    3475 
    3476 template<typename T, typename allocatorT>
    3477 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3478 {
    3479  vec.insert(vec.begin() + index, item);
    3480 }
    3481 
    3482 template<typename T, typename allocatorT>
    3483 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3484 {
    3485  vec.erase(vec.begin() + index);
    3486 }
    3487 
    3488 #else // #if VMA_USE_STL_VECTOR
    3489 
    3490 /* Class with interface compatible with subset of std::vector.
    3491 T must be POD because constructors and destructors are not called and memcpy is
    3492 used for these objects. */
    3493 template<typename T, typename AllocatorT>
    3494 class VmaVector
    3495 {
    3496 public:
    3497  typedef T value_type;
    3498 
    3499  VmaVector(const AllocatorT& allocator) :
    3500  m_Allocator(allocator),
    3501  m_pArray(VMA_NULL),
    3502  m_Count(0),
    3503  m_Capacity(0)
    3504  {
    3505  }
    3506 
    3507  VmaVector(size_t count, const AllocatorT& allocator) :
    3508  m_Allocator(allocator),
    3509  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3510  m_Count(count),
    3511  m_Capacity(count)
    3512  {
    3513  }
    3514 
    3515  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3516  m_Allocator(src.m_Allocator),
    3517  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3518  m_Count(src.m_Count),
    3519  m_Capacity(src.m_Count)
    3520  {
    3521  if(m_Count != 0)
    3522  {
    3523  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3524  }
    3525  }
    3526 
    3527  ~VmaVector()
    3528  {
    3529  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3530  }
    3531 
    3532  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3533  {
    3534  if(&rhs != this)
    3535  {
    3536  resize(rhs.m_Count);
    3537  if(m_Count != 0)
    3538  {
    3539  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3540  }
    3541  }
    3542  return *this;
    3543  }
    3544 
    3545  bool empty() const { return m_Count == 0; }
    3546  size_t size() const { return m_Count; }
    3547  T* data() { return m_pArray; }
    3548  const T* data() const { return m_pArray; }
    3549 
    3550  T& operator[](size_t index)
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555  const T& operator[](size_t index) const
    3556  {
    3557  VMA_HEAVY_ASSERT(index < m_Count);
    3558  return m_pArray[index];
    3559  }
    3560 
    3561  T& front()
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  const T& front() const
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[0];
    3570  }
    3571  T& back()
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576  const T& back() const
    3577  {
    3578  VMA_HEAVY_ASSERT(m_Count > 0);
    3579  return m_pArray[m_Count - 1];
    3580  }
    3581 
    3582  void reserve(size_t newCapacity, bool freeMemory = false)
    3583  {
    3584  newCapacity = VMA_MAX(newCapacity, m_Count);
    3585 
    3586  if((newCapacity < m_Capacity) && !freeMemory)
    3587  {
    3588  newCapacity = m_Capacity;
    3589  }
    3590 
    3591  if(newCapacity != m_Capacity)
    3592  {
    3593  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3594  if(m_Count != 0)
    3595  {
    3596  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3597  }
    3598  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3599  m_Capacity = newCapacity;
    3600  m_pArray = newArray;
    3601  }
    3602  }
    3603 
    3604  void resize(size_t newCount, bool freeMemory = false)
    3605  {
    3606  size_t newCapacity = m_Capacity;
    3607  if(newCount > m_Capacity)
    3608  {
    3609  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3610  }
    3611  else if(freeMemory)
    3612  {
    3613  newCapacity = newCount;
    3614  }
    3615 
    3616  if(newCapacity != m_Capacity)
    3617  {
    3618  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3619  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3620  if(elementsToCopy != 0)
    3621  {
    3622  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3623  }
    3624  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3625  m_Capacity = newCapacity;
    3626  m_pArray = newArray;
    3627  }
    3628 
    3629  m_Count = newCount;
    3630  }
    3631 
    3632  void clear(bool freeMemory = false)
    3633  {
    3634  resize(0, freeMemory);
    3635  }
    3636 
    3637  void insert(size_t index, const T& src)
    3638  {
    3639  VMA_HEAVY_ASSERT(index <= m_Count);
    3640  const size_t oldCount = size();
    3641  resize(oldCount + 1);
    3642  if(index < oldCount)
    3643  {
    3644  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3645  }
    3646  m_pArray[index] = src;
    3647  }
    3648 
    3649  void remove(size_t index)
    3650  {
    3651  VMA_HEAVY_ASSERT(index < m_Count);
    3652  const size_t oldCount = size();
    3653  if(index < oldCount - 1)
    3654  {
    3655  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3656  }
    3657  resize(oldCount - 1);
    3658  }
    3659 
    3660  void push_back(const T& src)
    3661  {
    3662  const size_t newIndex = size();
    3663  resize(newIndex + 1);
    3664  m_pArray[newIndex] = src;
    3665  }
    3666 
    3667  void pop_back()
    3668  {
    3669  VMA_HEAVY_ASSERT(m_Count > 0);
    3670  resize(size() - 1);
    3671  }
    3672 
    3673  void push_front(const T& src)
    3674  {
    3675  insert(0, src);
    3676  }
    3677 
    3678  void pop_front()
    3679  {
    3680  VMA_HEAVY_ASSERT(m_Count > 0);
    3681  remove(0);
    3682  }
    3683 
    3684  typedef T* iterator;
    3685 
    3686  iterator begin() { return m_pArray; }
    3687  iterator end() { return m_pArray + m_Count; }
    3688 
    3689 private:
    3690  AllocatorT m_Allocator;
    3691  T* m_pArray;
    3692  size_t m_Count;
    3693  size_t m_Capacity;
    3694 };
    3695 
    3696 template<typename T, typename allocatorT>
    3697 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3698 {
    3699  vec.insert(index, item);
    3700 }
    3701 
    3702 template<typename T, typename allocatorT>
    3703 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3704 {
    3705  vec.remove(index);
    3706 }
    3707 
    3708 #endif // #if VMA_USE_STL_VECTOR
    3709 
    3710 template<typename CmpLess, typename VectorT>
    3711 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3712 {
    3713  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3714  vector.data(),
    3715  vector.data() + vector.size(),
    3716  value,
    3717  CmpLess()) - vector.data();
    3718  VmaVectorInsert(vector, indexToInsert, value);
    3719  return indexToInsert;
    3720 }
    3721 
    3722 template<typename CmpLess, typename VectorT>
    3723 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3724 {
    3725  CmpLess comparator;
    3726  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3727  vector.begin(),
    3728  vector.end(),
    3729  value,
    3730  comparator);
    3731  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3732  {
    3733  size_t indexToRemove = it - vector.begin();
    3734  VmaVectorRemove(vector, indexToRemove);
    3735  return true;
    3736  }
    3737  return false;
    3738 }
    3739 
    3740 template<typename CmpLess, typename IterT, typename KeyT>
    3741 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3742 {
    3743  CmpLess comparator;
    3744  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3745  beg, end, value, comparator);
    3746  if(it == end ||
    3747  (!comparator(*it, value) && !comparator(value, *it)))
    3748  {
    3749  return it;
    3750  }
    3751  return end;
    3752 }
    3753 
    3755 // class VmaPoolAllocator
    3756 
    3757 /*
    3758 Allocator for objects of type T using a list of arrays (pools) to speed up
    3759 allocation. Number of elements that can be allocated is not bounded because
    3760 allocator can create multiple blocks.
    3761 */
    3762 template<typename T>
    3763 class VmaPoolAllocator
    3764 {
    3765  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3766 public:
    3767  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3768  ~VmaPoolAllocator();
    3769  void Clear();
    3770  T* Alloc();
    3771  void Free(T* ptr);
    3772 
    3773 private:
    3774  union Item
    3775  {
    3776  uint32_t NextFreeIndex;
    3777  T Value;
    3778  };
    3779 
    3780  struct ItemBlock
    3781  {
    3782  Item* pItems;
    3783  uint32_t FirstFreeIndex;
    3784  };
    3785 
    3786  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3787  size_t m_ItemsPerBlock;
    3788  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3789 
    3790  ItemBlock& CreateNewBlock();
    3791 };
    3792 
    3793 template<typename T>
    3794 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3795  m_pAllocationCallbacks(pAllocationCallbacks),
    3796  m_ItemsPerBlock(itemsPerBlock),
    3797  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3798 {
    3799  VMA_ASSERT(itemsPerBlock > 0);
    3800 }
    3801 
    3802 template<typename T>
    3803 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3804 {
    3805  Clear();
    3806 }
    3807 
    3808 template<typename T>
    3809 void VmaPoolAllocator<T>::Clear()
    3810 {
    3811  for(size_t i = m_ItemBlocks.size(); i--; )
    3812  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3813  m_ItemBlocks.clear();
    3814 }
    3815 
    3816 template<typename T>
    3817 T* VmaPoolAllocator<T>::Alloc()
    3818 {
    3819  for(size_t i = m_ItemBlocks.size(); i--; )
    3820  {
    3821  ItemBlock& block = m_ItemBlocks[i];
    3822  // This block has some free items: Use first one.
    3823  if(block.FirstFreeIndex != UINT32_MAX)
    3824  {
    3825  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3826  block.FirstFreeIndex = pItem->NextFreeIndex;
    3827  return &pItem->Value;
    3828  }
    3829  }
    3830 
    3831  // No block has free item: Create new one and use it.
    3832  ItemBlock& newBlock = CreateNewBlock();
    3833  Item* const pItem = &newBlock.pItems[0];
    3834  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3835  return &pItem->Value;
    3836 }
    3837 
    3838 template<typename T>
    3839 void VmaPoolAllocator<T>::Free(T* ptr)
    3840 {
    3841  // Search all memory blocks to find ptr.
    3842  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3843  {
    3844  ItemBlock& block = m_ItemBlocks[i];
    3845 
    3846  // Casting to union.
    3847  Item* pItemPtr;
    3848  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3849 
    3850  // Check if pItemPtr is in address range of this block.
    3851  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3852  {
    3853  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3854  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3855  block.FirstFreeIndex = index;
    3856  return;
    3857  }
    3858  }
    3859  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3860 }
    3861 
    3862 template<typename T>
    3863 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3864 {
    3865  ItemBlock newBlock = {
    3866  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3867 
    3868  m_ItemBlocks.push_back(newBlock);
    3869 
    3870  // Setup singly-linked list of all free items in this block.
    3871  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3872  newBlock.pItems[i].NextFreeIndex = i + 1;
    3873  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3874  return m_ItemBlocks.back();
    3875 }
    3876 
    3878 // class VmaRawList, VmaList
    3879 
    3880 #if VMA_USE_STL_LIST
    3881 
    3882 #define VmaList std::list
    3883 
    3884 #else // #if VMA_USE_STL_LIST
    3885 
    3886 template<typename T>
    3887 struct VmaListItem
    3888 {
    3889  VmaListItem* pPrev;
    3890  VmaListItem* pNext;
    3891  T Value;
    3892 };
    3893 
    3894 // Doubly linked list.
    3895 template<typename T>
    3896 class VmaRawList
    3897 {
    3898  VMA_CLASS_NO_COPY(VmaRawList)
    3899 public:
    3900  typedef VmaListItem<T> ItemType;
    3901 
    3902  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3903  ~VmaRawList();
    3904  void Clear();
    3905 
    3906  size_t GetCount() const { return m_Count; }
    3907  bool IsEmpty() const { return m_Count == 0; }
    3908 
    3909  ItemType* Front() { return m_pFront; }
    3910  const ItemType* Front() const { return m_pFront; }
    3911  ItemType* Back() { return m_pBack; }
    3912  const ItemType* Back() const { return m_pBack; }
    3913 
    3914  ItemType* PushBack();
    3915  ItemType* PushFront();
    3916  ItemType* PushBack(const T& value);
    3917  ItemType* PushFront(const T& value);
    3918  void PopBack();
    3919  void PopFront();
    3920 
    3921  // Item can be null - it means PushBack.
    3922  ItemType* InsertBefore(ItemType* pItem);
    3923  // Item can be null - it means PushFront.
    3924  ItemType* InsertAfter(ItemType* pItem);
    3925 
    3926  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3927  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3928 
    3929  void Remove(ItemType* pItem);
    3930 
    3931 private:
    3932  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3933  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3934  ItemType* m_pFront;
    3935  ItemType* m_pBack;
    3936  size_t m_Count;
    3937 };
    3938 
    3939 template<typename T>
    3940 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3941  m_pAllocationCallbacks(pAllocationCallbacks),
    3942  m_ItemAllocator(pAllocationCallbacks, 128),
    3943  m_pFront(VMA_NULL),
    3944  m_pBack(VMA_NULL),
    3945  m_Count(0)
    3946 {
    3947 }
    3948 
    3949 template<typename T>
    3950 VmaRawList<T>::~VmaRawList()
    3951 {
    3952  // Intentionally not calling Clear, because that would be unnecessary
    3953  // computations to return all items to m_ItemAllocator as free.
    3954 }
    3955 
    3956 template<typename T>
    3957 void VmaRawList<T>::Clear()
    3958 {
    3959  if(IsEmpty() == false)
    3960  {
    3961  ItemType* pItem = m_pBack;
    3962  while(pItem != VMA_NULL)
    3963  {
    3964  ItemType* const pPrevItem = pItem->pPrev;
    3965  m_ItemAllocator.Free(pItem);
    3966  pItem = pPrevItem;
    3967  }
    3968  m_pFront = VMA_NULL;
    3969  m_pBack = VMA_NULL;
    3970  m_Count = 0;
    3971  }
    3972 }
    3973 
    3974 template<typename T>
    3975 VmaListItem<T>* VmaRawList<T>::PushBack()
    3976 {
    3977  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3978  pNewItem->pNext = VMA_NULL;
    3979  if(IsEmpty())
    3980  {
    3981  pNewItem->pPrev = VMA_NULL;
    3982  m_pFront = pNewItem;
    3983  m_pBack = pNewItem;
    3984  m_Count = 1;
    3985  }
    3986  else
    3987  {
    3988  pNewItem->pPrev = m_pBack;
    3989  m_pBack->pNext = pNewItem;
    3990  m_pBack = pNewItem;
    3991  ++m_Count;
    3992  }
    3993  return pNewItem;
    3994 }
    3995 
    3996 template<typename T>
    3997 VmaListItem<T>* VmaRawList<T>::PushFront()
    3998 {
    3999  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4000  pNewItem->pPrev = VMA_NULL;
    4001  if(IsEmpty())
    4002  {
    4003  pNewItem->pNext = VMA_NULL;
    4004  m_pFront = pNewItem;
    4005  m_pBack = pNewItem;
    4006  m_Count = 1;
    4007  }
    4008  else
    4009  {
    4010  pNewItem->pNext = m_pFront;
    4011  m_pFront->pPrev = pNewItem;
    4012  m_pFront = pNewItem;
    4013  ++m_Count;
    4014  }
    4015  return pNewItem;
    4016 }
    4017 
    4018 template<typename T>
    4019 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4020 {
    4021  ItemType* const pNewItem = PushBack();
    4022  pNewItem->Value = value;
    4023  return pNewItem;
    4024 }
    4025 
    4026 template<typename T>
    4027 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4028 {
    4029  ItemType* const pNewItem = PushFront();
    4030  pNewItem->Value = value;
    4031  return pNewItem;
    4032 }
    4033 
    4034 template<typename T>
    4035 void VmaRawList<T>::PopBack()
    4036 {
    4037  VMA_HEAVY_ASSERT(m_Count > 0);
    4038  ItemType* const pBackItem = m_pBack;
    4039  ItemType* const pPrevItem = pBackItem->pPrev;
    4040  if(pPrevItem != VMA_NULL)
    4041  {
    4042  pPrevItem->pNext = VMA_NULL;
    4043  }
    4044  m_pBack = pPrevItem;
    4045  m_ItemAllocator.Free(pBackItem);
    4046  --m_Count;
    4047 }
    4048 
    4049 template<typename T>
    4050 void VmaRawList<T>::PopFront()
    4051 {
    4052  VMA_HEAVY_ASSERT(m_Count > 0);
    4053  ItemType* const pFrontItem = m_pFront;
    4054  ItemType* const pNextItem = pFrontItem->pNext;
    4055  if(pNextItem != VMA_NULL)
    4056  {
    4057  pNextItem->pPrev = VMA_NULL;
    4058  }
    4059  m_pFront = pNextItem;
    4060  m_ItemAllocator.Free(pFrontItem);
    4061  --m_Count;
    4062 }
    4063 
    4064 template<typename T>
    4065 void VmaRawList<T>::Remove(ItemType* pItem)
    4066 {
    4067  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4068  VMA_HEAVY_ASSERT(m_Count > 0);
    4069 
    4070  if(pItem->pPrev != VMA_NULL)
    4071  {
    4072  pItem->pPrev->pNext = pItem->pNext;
    4073  }
    4074  else
    4075  {
    4076  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4077  m_pFront = pItem->pNext;
    4078  }
    4079 
    4080  if(pItem->pNext != VMA_NULL)
    4081  {
    4082  pItem->pNext->pPrev = pItem->pPrev;
    4083  }
    4084  else
    4085  {
    4086  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4087  m_pBack = pItem->pPrev;
    4088  }
    4089 
    4090  m_ItemAllocator.Free(pItem);
    4091  --m_Count;
    4092 }
    4093 
    4094 template<typename T>
    4095 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4096 {
    4097  if(pItem != VMA_NULL)
    4098  {
    4099  ItemType* const prevItem = pItem->pPrev;
    4100  ItemType* const newItem = m_ItemAllocator.Alloc();
    4101  newItem->pPrev = prevItem;
    4102  newItem->pNext = pItem;
    4103  pItem->pPrev = newItem;
    4104  if(prevItem != VMA_NULL)
    4105  {
    4106  prevItem->pNext = newItem;
    4107  }
    4108  else
    4109  {
    4110  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4111  m_pFront = newItem;
    4112  }
    4113  ++m_Count;
    4114  return newItem;
    4115  }
    4116  else
    4117  return PushBack();
    4118 }
    4119 
    4120 template<typename T>
    4121 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4122 {
    4123  if(pItem != VMA_NULL)
    4124  {
    4125  ItemType* const nextItem = pItem->pNext;
    4126  ItemType* const newItem = m_ItemAllocator.Alloc();
    4127  newItem->pNext = nextItem;
    4128  newItem->pPrev = pItem;
    4129  pItem->pNext = newItem;
    4130  if(nextItem != VMA_NULL)
    4131  {
    4132  nextItem->pPrev = newItem;
    4133  }
    4134  else
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4137  m_pBack = newItem;
    4138  }
    4139  ++m_Count;
    4140  return newItem;
    4141  }
    4142  else
    4143  return PushFront();
    4144 }
    4145 
    4146 template<typename T>
    4147 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4148 {
    4149  ItemType* const newItem = InsertBefore(pItem);
    4150  newItem->Value = value;
    4151  return newItem;
    4152 }
    4153 
    4154 template<typename T>
    4155 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4156 {
    4157  ItemType* const newItem = InsertAfter(pItem);
    4158  newItem->Value = value;
    4159  return newItem;
    4160 }
    4161 
    4162 template<typename T, typename AllocatorT>
    4163 class VmaList
    4164 {
    4165  VMA_CLASS_NO_COPY(VmaList)
    4166 public:
    4167  class iterator
    4168  {
    4169  public:
    4170  iterator() :
    4171  m_pList(VMA_NULL),
    4172  m_pItem(VMA_NULL)
    4173  {
    4174  }
    4175 
    4176  T& operator*() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return m_pItem->Value;
    4180  }
    4181  T* operator->() const
    4182  {
    4183  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4184  return &m_pItem->Value;
    4185  }
    4186 
    4187  iterator& operator++()
    4188  {
    4189  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4190  m_pItem = m_pItem->pNext;
    4191  return *this;
    4192  }
    4193  iterator& operator--()
    4194  {
    4195  if(m_pItem != VMA_NULL)
    4196  {
    4197  m_pItem = m_pItem->pPrev;
    4198  }
    4199  else
    4200  {
    4201  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4202  m_pItem = m_pList->Back();
    4203  }
    4204  return *this;
    4205  }
    4206 
    4207  iterator operator++(int)
    4208  {
    4209  iterator result = *this;
    4210  ++*this;
    4211  return result;
    4212  }
    4213  iterator operator--(int)
    4214  {
    4215  iterator result = *this;
    4216  --*this;
    4217  return result;
    4218  }
    4219 
    4220  bool operator==(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem == rhs.m_pItem;
    4224  }
    4225  bool operator!=(const iterator& rhs) const
    4226  {
    4227  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4228  return m_pItem != rhs.m_pItem;
    4229  }
    4230 
    4231  private:
    4232  VmaRawList<T>* m_pList;
    4233  VmaListItem<T>* m_pItem;
    4234 
    4235  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4236  m_pList(pList),
    4237  m_pItem(pItem)
    4238  {
    4239  }
    4240 
    4241  friend class VmaList<T, AllocatorT>;
    4242  };
    4243 
    4244  class const_iterator
    4245  {
    4246  public:
    4247  const_iterator() :
    4248  m_pList(VMA_NULL),
    4249  m_pItem(VMA_NULL)
    4250  {
    4251  }
    4252 
    4253  const_iterator(const iterator& src) :
    4254  m_pList(src.m_pList),
    4255  m_pItem(src.m_pItem)
    4256  {
    4257  }
    4258 
    4259  const T& operator*() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return m_pItem->Value;
    4263  }
    4264  const T* operator->() const
    4265  {
    4266  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4267  return &m_pItem->Value;
    4268  }
    4269 
    4270  const_iterator& operator++()
    4271  {
    4272  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4273  m_pItem = m_pItem->pNext;
    4274  return *this;
    4275  }
    4276  const_iterator& operator--()
    4277  {
    4278  if(m_pItem != VMA_NULL)
    4279  {
    4280  m_pItem = m_pItem->pPrev;
    4281  }
    4282  else
    4283  {
    4284  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4285  m_pItem = m_pList->Back();
    4286  }
    4287  return *this;
    4288  }
    4289 
    4290  const_iterator operator++(int)
    4291  {
    4292  const_iterator result = *this;
    4293  ++*this;
    4294  return result;
    4295  }
    4296  const_iterator operator--(int)
    4297  {
    4298  const_iterator result = *this;
    4299  --*this;
    4300  return result;
    4301  }
    4302 
    4303  bool operator==(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem == rhs.m_pItem;
    4307  }
    4308  bool operator!=(const const_iterator& rhs) const
    4309  {
    4310  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4311  return m_pItem != rhs.m_pItem;
    4312  }
    4313 
    4314  private:
    4315  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4316  m_pList(pList),
    4317  m_pItem(pItem)
    4318  {
    4319  }
    4320 
    4321  const VmaRawList<T>* m_pList;
    4322  const VmaListItem<T>* m_pItem;
    4323 
    4324  friend class VmaList<T, AllocatorT>;
    4325  };
    4326 
    4327  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4328 
    4329  bool empty() const { return m_RawList.IsEmpty(); }
    4330  size_t size() const { return m_RawList.GetCount(); }
    4331 
    4332  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4333  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4334 
    4335  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4336  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4337 
    4338  void clear() { m_RawList.Clear(); }
    4339  void push_back(const T& value) { m_RawList.PushBack(value); }
    4340  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4341  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4342 
    4343 private:
    4344  VmaRawList<T> m_RawList;
    4345 };
    4346 
    4347 #endif // #if VMA_USE_STL_LIST
    4348 
    4350 // class VmaMap
    4351 
    4352 // Unused in this version.
    4353 #if 0
    4354 
    4355 #if VMA_USE_STL_UNORDERED_MAP
    4356 
    4357 #define VmaPair std::pair
    4358 
    4359 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4360  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4361 
    4362 #else // #if VMA_USE_STL_UNORDERED_MAP
    4363 
    4364 template<typename T1, typename T2>
    4365 struct VmaPair
    4366 {
    4367  T1 first;
    4368  T2 second;
    4369 
    4370  VmaPair() : first(), second() { }
    4371  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4372 };
    4373 
    4374 /* Class compatible with subset of interface of std::unordered_map.
    4375 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4376 */
    4377 template<typename KeyT, typename ValueT>
    4378 class VmaMap
    4379 {
    4380 public:
    4381  typedef VmaPair<KeyT, ValueT> PairType;
    4382  typedef PairType* iterator;
    4383 
    4384  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4385 
    4386  iterator begin() { return m_Vector.begin(); }
    4387  iterator end() { return m_Vector.end(); }
    4388 
    4389  void insert(const PairType& pair);
    4390  iterator find(const KeyT& key);
    4391  void erase(iterator it);
    4392 
    4393 private:
    4394  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4395 };
    4396 
    4397 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4398 
    4399 template<typename FirstT, typename SecondT>
    4400 struct VmaPairFirstLess
    4401 {
    4402  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4403  {
    4404  return lhs.first < rhs.first;
    4405  }
    4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4407  {
    4408  return lhs.first < rhsFirst;
    4409  }
    4410 };
    4411 
    4412 template<typename KeyT, typename ValueT>
    4413 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4414 {
    4415  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4416  m_Vector.data(),
    4417  m_Vector.data() + m_Vector.size(),
    4418  pair,
    4419  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4420  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4421 }
    4422 
    4423 template<typename KeyT, typename ValueT>
    4424 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4425 {
    4426  PairType* it = VmaBinaryFindFirstNotLess(
    4427  m_Vector.data(),
    4428  m_Vector.data() + m_Vector.size(),
    4429  key,
    4430  VmaPairFirstLess<KeyT, ValueT>());
    4431  if((it != m_Vector.end()) && (it->first == key))
    4432  {
    4433  return it;
    4434  }
    4435  else
    4436  {
    4437  return m_Vector.end();
    4438  }
    4439 }
    4440 
    4441 template<typename KeyT, typename ValueT>
    4442 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4443 {
    4444  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4445 }
    4446 
    4447 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4448 
    4449 #endif // #if 0
    4450 
    4452 
    4453 class VmaDeviceMemoryBlock;
    4454 
    4455 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4456 
    4457 struct VmaAllocation_T
    4458 {
    4459  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4460 private:
    4461  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4462 
    4463  enum FLAGS
    4464  {
    4465  FLAG_USER_DATA_STRING = 0x01,
    4466  };
    4467 
    4468 public:
    4469  enum ALLOCATION_TYPE
    4470  {
    4471  ALLOCATION_TYPE_NONE,
    4472  ALLOCATION_TYPE_BLOCK,
    4473  ALLOCATION_TYPE_DEDICATED,
    4474  };
    4475 
    4476  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4477  m_Alignment(1),
    4478  m_Size(0),
    4479  m_pUserData(VMA_NULL),
    4480  m_LastUseFrameIndex(currentFrameIndex),
    4481  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4482  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4483  m_MapCount(0),
    4484  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4485  {
    4486 #if VMA_STATS_STRING_ENABLED
    4487  m_CreationFrameIndex = currentFrameIndex;
    4488  m_BufferImageUsage = 0;
    4489 #endif
    4490  }
    4491 
    4492  ~VmaAllocation_T()
    4493  {
    4494  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4495 
    4496  // Check if owned string was freed.
    4497  VMA_ASSERT(m_pUserData == VMA_NULL);
    4498  }
    4499 
    4500  void InitBlockAllocation(
    4501  VmaPool hPool,
    4502  VmaDeviceMemoryBlock* block,
    4503  VkDeviceSize offset,
    4504  VkDeviceSize alignment,
    4505  VkDeviceSize size,
    4506  VmaSuballocationType suballocationType,
    4507  bool mapped,
    4508  bool canBecomeLost)
    4509  {
    4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4511  VMA_ASSERT(block != VMA_NULL);
    4512  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4513  m_Alignment = alignment;
    4514  m_Size = size;
    4515  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4516  m_SuballocationType = (uint8_t)suballocationType;
    4517  m_BlockAllocation.m_hPool = hPool;
    4518  m_BlockAllocation.m_Block = block;
    4519  m_BlockAllocation.m_Offset = offset;
    4520  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4521  }
    4522 
    4523  void InitLost()
    4524  {
    4525  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4526  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4527  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4528  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4529  m_BlockAllocation.m_Block = VMA_NULL;
    4530  m_BlockAllocation.m_Offset = 0;
    4531  m_BlockAllocation.m_CanBecomeLost = true;
    4532  }
    4533 
    4534  void ChangeBlockAllocation(
    4535  VmaAllocator hAllocator,
    4536  VmaDeviceMemoryBlock* block,
    4537  VkDeviceSize offset);
    4538 
    4539  void ChangeSize(VkDeviceSize newSize);
    4540 
    4541  // pMappedData not null means allocation is created with MAPPED flag.
    4542  void InitDedicatedAllocation(
    4543  uint32_t memoryTypeIndex,
    4544  VkDeviceMemory hMemory,
    4545  VmaSuballocationType suballocationType,
    4546  void* pMappedData,
    4547  VkDeviceSize size)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4550  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4551  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4552  m_Alignment = 0;
    4553  m_Size = size;
    4554  m_SuballocationType = (uint8_t)suballocationType;
    4555  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4556  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4557  m_DedicatedAllocation.m_hMemory = hMemory;
    4558  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4559  }
    4560 
    4561  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4562  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4563  VkDeviceSize GetSize() const { return m_Size; }
    4564  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4565  void* GetUserData() const { return m_pUserData; }
    4566  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4567  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4568 
    4569  VmaDeviceMemoryBlock* GetBlock() const
    4570  {
    4571  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4572  return m_BlockAllocation.m_Block;
    4573  }
    4574  VkDeviceSize GetOffset() const;
    4575  VkDeviceMemory GetMemory() const;
    4576  uint32_t GetMemoryTypeIndex() const;
    4577  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4578  void* GetMappedData() const;
    4579  bool CanBecomeLost() const;
    4580  VmaPool GetPool() const;
    4581 
    4582  uint32_t GetLastUseFrameIndex() const
    4583  {
    4584  return m_LastUseFrameIndex.load();
    4585  }
    4586  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4587  {
    4588  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4589  }
    4590  /*
    4591  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4592  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4593  - Else, returns false.
    4594 
    4595  If hAllocation is already lost, assert - you should not call it then.
    4596  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4597  */
    4598  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4599 
    4600  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4601  {
    4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4603  outInfo.blockCount = 1;
    4604  outInfo.allocationCount = 1;
    4605  outInfo.unusedRangeCount = 0;
    4606  outInfo.usedBytes = m_Size;
    4607  outInfo.unusedBytes = 0;
    4608  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4609  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4610  outInfo.unusedRangeSizeMax = 0;
    4611  }
    4612 
    4613  void BlockAllocMap();
    4614  void BlockAllocUnmap();
    4615  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4616  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4617 
    4618 #if VMA_STATS_STRING_ENABLED
    4619  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4620  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4621 
    4622  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4623  {
    4624  VMA_ASSERT(m_BufferImageUsage == 0);
    4625  m_BufferImageUsage = bufferImageUsage;
    4626  }
    4627 
    4628  void PrintParameters(class VmaJsonWriter& json) const;
    4629 #endif
    4630 
    4631 private:
    4632  VkDeviceSize m_Alignment;
    4633  VkDeviceSize m_Size;
    4634  void* m_pUserData;
    4635  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4636  uint8_t m_Type; // ALLOCATION_TYPE
    4637  uint8_t m_SuballocationType; // VmaSuballocationType
    4638  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4639  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4640  uint8_t m_MapCount;
    4641  uint8_t m_Flags; // enum FLAGS
    4642 
    4643  // Allocation out of VmaDeviceMemoryBlock.
    4644  struct BlockAllocation
    4645  {
    4646  VmaPool m_hPool; // Null if belongs to general memory.
    4647  VmaDeviceMemoryBlock* m_Block;
    4648  VkDeviceSize m_Offset;
    4649  bool m_CanBecomeLost;
    4650  };
    4651 
    4652  // Allocation for an object that has its own private VkDeviceMemory.
    4653  struct DedicatedAllocation
    4654  {
    4655  uint32_t m_MemoryTypeIndex;
    4656  VkDeviceMemory m_hMemory;
    4657  void* m_pMappedData; // Not null means memory is mapped.
    4658  };
    4659 
    4660  union
    4661  {
    4662  // Allocation out of VmaDeviceMemoryBlock.
    4663  BlockAllocation m_BlockAllocation;
    4664  // Allocation for an object that has its own private VkDeviceMemory.
    4665  DedicatedAllocation m_DedicatedAllocation;
    4666  };
    4667 
    4668 #if VMA_STATS_STRING_ENABLED
    4669  uint32_t m_CreationFrameIndex;
    4670  uint32_t m_BufferImageUsage; // 0 if unknown.
    4671 #endif
    4672 
    4673  void FreeUserDataString(VmaAllocator hAllocator);
    4674 };
    4675 
    4676 /*
    4677 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4678 allocated memory block or free.
    4679 */
    4680 struct VmaSuballocation
    4681 {
    4682  VkDeviceSize offset;
    4683  VkDeviceSize size;
    4684  VmaAllocation hAllocation;
    4685  VmaSuballocationType type;
    4686 };
    4687 
    4688 // Comparator for offsets.
    4689 struct VmaSuballocationOffsetLess
    4690 {
    4691  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4692  {
    4693  return lhs.offset < rhs.offset;
    4694  }
    4695 };
    4696 struct VmaSuballocationOffsetGreater
    4697 {
    4698  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4699  {
    4700  return lhs.offset > rhs.offset;
    4701  }
    4702 };
    4703 
    4704 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4705 
    4706 // Cost of one additional allocation lost, as equivalent in bytes.
    4707 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4708 
    4709 /*
    4710 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4711 
    4712 If canMakeOtherLost was false:
    4713 - item points to a FREE suballocation.
    4714 - itemsToMakeLostCount is 0.
    4715 
    4716 If canMakeOtherLost was true:
    4717 - item points to first of sequence of suballocations, which are either FREE,
    4718  or point to VmaAllocations that can become lost.
    4719 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4720  the requested allocation to succeed.
    4721 */
    4722 struct VmaAllocationRequest
    4723 {
    4724  VkDeviceSize offset;
    4725  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4726  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4727  VmaSuballocationList::iterator item;
    4728  size_t itemsToMakeLostCount;
    4729  void* customData;
    4730 
    4731  VkDeviceSize CalcCost() const
    4732  {
    4733  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4734  }
    4735 };
    4736 
    4737 /*
    4738 Data structure used for bookkeeping of allocations and unused ranges of memory
    4739 in a single VkDeviceMemory block.
    4740 */
    4741 class VmaBlockMetadata
    4742 {
    4743 public:
    4744  VmaBlockMetadata(VmaAllocator hAllocator);
    4745  virtual ~VmaBlockMetadata() { }
    4746  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4747 
    4748  // Validates all data structures inside this object. If not valid, returns false.
    4749  virtual bool Validate() const = 0;
    4750  VkDeviceSize GetSize() const { return m_Size; }
    4751  virtual size_t GetAllocationCount() const = 0;
    4752  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4753  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4754  // Returns true if this block is empty - contains only single free suballocation.
    4755  virtual bool IsEmpty() const = 0;
    4756 
    4757  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4758  // Shouldn't modify blockCount.
    4759  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4760 
    4761 #if VMA_STATS_STRING_ENABLED
    4762  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4763 #endif
    4764 
    4765  // Tries to find a place for suballocation with given parameters inside this block.
    4766  // If succeeded, fills pAllocationRequest and returns true.
    4767  // If failed, returns false.
    4768  virtual bool CreateAllocationRequest(
    4769  uint32_t currentFrameIndex,
    4770  uint32_t frameInUseCount,
    4771  VkDeviceSize bufferImageGranularity,
    4772  VkDeviceSize allocSize,
    4773  VkDeviceSize allocAlignment,
    4774  bool upperAddress,
    4775  VmaSuballocationType allocType,
    4776  bool canMakeOtherLost,
    4777  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual bool MakeRequestedAllocationsLost(
    4781  uint32_t currentFrameIndex,
    4782  uint32_t frameInUseCount,
    4783  VmaAllocationRequest* pAllocationRequest) = 0;
    4784 
    4785  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4786 
    4787  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4788 
    4789  // Makes actual allocation based on request. Request must already be checked and valid.
    4790  virtual void Alloc(
    4791  const VmaAllocationRequest& request,
    4792  VmaSuballocationType type,
    4793  VkDeviceSize allocSize,
    4794  bool upperAddress,
    4795  VmaAllocation hAllocation) = 0;
    4796 
    4797  // Frees suballocation assigned to given memory region.
    4798  virtual void Free(const VmaAllocation allocation) = 0;
    4799  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4800 
    4801  // Tries to resize (grow or shrink) space for given allocation, in place.
    4802  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4803 
    4804 protected:
    4805  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4806 
    4807 #if VMA_STATS_STRING_ENABLED
    4808  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4809  VkDeviceSize unusedBytes,
    4810  size_t allocationCount,
    4811  size_t unusedRangeCount) const;
    4812  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4813  VkDeviceSize offset,
    4814  VmaAllocation hAllocation) const;
    4815  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4816  VkDeviceSize offset,
    4817  VkDeviceSize size) const;
    4818  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4819 #endif
    4820 
    4821 private:
    4822  VkDeviceSize m_Size;
    4823  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4824 };
    4825 
    4826 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4827  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4828  return false; \
    4829  } } while(false)
    4830 
    4831 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4832 {
    4833  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4834 public:
    4835  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4836  virtual ~VmaBlockMetadata_Generic();
    4837  virtual void Init(VkDeviceSize size);
    4838 
    4839  virtual bool Validate() const;
    4840  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4841  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4842  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4843  virtual bool IsEmpty() const;
    4844 
    4845  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4846  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4847 
    4848 #if VMA_STATS_STRING_ENABLED
    4849  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4850 #endif
    4851 
    4852  virtual bool CreateAllocationRequest(
    4853  uint32_t currentFrameIndex,
    4854  uint32_t frameInUseCount,
    4855  VkDeviceSize bufferImageGranularity,
    4856  VkDeviceSize allocSize,
    4857  VkDeviceSize allocAlignment,
    4858  bool upperAddress,
    4859  VmaSuballocationType allocType,
    4860  bool canMakeOtherLost,
    4861  uint32_t strategy,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual bool MakeRequestedAllocationsLost(
    4865  uint32_t currentFrameIndex,
    4866  uint32_t frameInUseCount,
    4867  VmaAllocationRequest* pAllocationRequest);
    4868 
    4869  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4870 
    4871  virtual VkResult CheckCorruption(const void* pBlockData);
    4872 
    4873  virtual void Alloc(
    4874  const VmaAllocationRequest& request,
    4875  VmaSuballocationType type,
    4876  VkDeviceSize allocSize,
    4877  bool upperAddress,
    4878  VmaAllocation hAllocation);
    4879 
    4880  virtual void Free(const VmaAllocation allocation);
    4881  virtual void FreeAtOffset(VkDeviceSize offset);
    4882 
    4883  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4884 
    4885 private:
    4886  uint32_t m_FreeCount;
    4887  VkDeviceSize m_SumFreeSize;
    4888  VmaSuballocationList m_Suballocations;
    4889  // Suballocations that are free and have size greater than certain threshold.
    4890  // Sorted by size, ascending.
    4891  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4892 
    4893  bool ValidateFreeSuballocationList() const;
    4894 
    4895  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4896  // If yes, fills pOffset and returns true. If no, returns false.
    4897  bool CheckAllocation(
    4898  uint32_t currentFrameIndex,
    4899  uint32_t frameInUseCount,
    4900  VkDeviceSize bufferImageGranularity,
    4901  VkDeviceSize allocSize,
    4902  VkDeviceSize allocAlignment,
    4903  VmaSuballocationType allocType,
    4904  VmaSuballocationList::const_iterator suballocItem,
    4905  bool canMakeOtherLost,
    4906  VkDeviceSize* pOffset,
    4907  size_t* itemsToMakeLostCount,
    4908  VkDeviceSize* pSumFreeSize,
    4909  VkDeviceSize* pSumItemSize) const;
    4910  // Given free suballocation, it merges it with following one, which must also be free.
    4911  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4912  // Releases given suballocation, making it free.
    4913  // Merges it with adjacent free suballocations if applicable.
    4914  // Returns iterator to new free suballocation at this place.
    4915  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4916  // Given free suballocation, it inserts it into sorted list of
    4917  // m_FreeSuballocationsBySize if it's suitable.
    4918  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4919  // Given free suballocation, it removes it from sorted list of
    4920  // m_FreeSuballocationsBySize if it's suitable.
    4921  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4922 };
    4923 
    4924 /*
    4925 Allocations and their references in internal data structure look like this:
    4926 
    4927 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4928 
    4929  0 +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 1st[m_1stNullItemsBeginCount]
    4935  +-------+
    4936  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4937  +-------+
    4938  | ... |
    4939  +-------+
    4940  | Alloc | 1st[1st.size() - 1]
    4941  +-------+
    4942  | |
    4943  | |
    4944  | |
    4945 GetSize() +-------+
    4946 
    4947 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4948 
    4949  0 +-------+
    4950  | Alloc | 2nd[0]
    4951  +-------+
    4952  | Alloc | 2nd[1]
    4953  +-------+
    4954  | ... |
    4955  +-------+
    4956  | Alloc | 2nd[2nd.size() - 1]
    4957  +-------+
    4958  | |
    4959  | |
    4960  | |
    4961  +-------+
    4962  | Alloc | 1st[m_1stNullItemsBeginCount]
    4963  +-------+
    4964  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4965  +-------+
    4966  | ... |
    4967  +-------+
    4968  | Alloc | 1st[1st.size() - 1]
    4969  +-------+
    4970  | |
    4971 GetSize() +-------+
    4972 
    4973 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4974 
    4975  0 +-------+
    4976  | |
    4977  | |
    4978  | |
    4979  +-------+
    4980  | Alloc | 1st[m_1stNullItemsBeginCount]
    4981  +-------+
    4982  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4983  +-------+
    4984  | ... |
    4985  +-------+
    4986  | Alloc | 1st[1st.size() - 1]
    4987  +-------+
    4988  | |
    4989  | |
    4990  | |
    4991  +-------+
    4992  | Alloc | 2nd[2nd.size() - 1]
    4993  +-------+
    4994  | ... |
    4995  +-------+
    4996  | Alloc | 2nd[1]
    4997  +-------+
    4998  | Alloc | 2nd[0]
    4999 GetSize() +-------+
    5000 
    5001 */
    5002 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5003 {
    5004  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5005 public:
    5006  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5007  virtual ~VmaBlockMetadata_Linear();
    5008  virtual void Init(VkDeviceSize size);
    5009 
    5010  virtual bool Validate() const;
    5011  virtual size_t GetAllocationCount() const;
    5012  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5013  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5014  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5015 
    5016  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5017  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5018 
    5019 #if VMA_STATS_STRING_ENABLED
    5020  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5021 #endif
    5022 
    5023  virtual bool CreateAllocationRequest(
    5024  uint32_t currentFrameIndex,
    5025  uint32_t frameInUseCount,
    5026  VkDeviceSize bufferImageGranularity,
    5027  VkDeviceSize allocSize,
    5028  VkDeviceSize allocAlignment,
    5029  bool upperAddress,
    5030  VmaSuballocationType allocType,
    5031  bool canMakeOtherLost,
    5032  uint32_t strategy,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual bool MakeRequestedAllocationsLost(
    5036  uint32_t currentFrameIndex,
    5037  uint32_t frameInUseCount,
    5038  VmaAllocationRequest* pAllocationRequest);
    5039 
    5040  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5041 
    5042  virtual VkResult CheckCorruption(const void* pBlockData);
    5043 
    5044  virtual void Alloc(
    5045  const VmaAllocationRequest& request,
    5046  VmaSuballocationType type,
    5047  VkDeviceSize allocSize,
    5048  bool upperAddress,
    5049  VmaAllocation hAllocation);
    5050 
    5051  virtual void Free(const VmaAllocation allocation);
    5052  virtual void FreeAtOffset(VkDeviceSize offset);
    5053 
    5054 private:
    5055  /*
    5056  There are two suballocation vectors, used in ping-pong way.
    5057  The one with index m_1stVectorIndex is called 1st.
    5058  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5059  2nd can be non-empty only when 1st is not empty.
    5060  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5061  */
    5062  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5063 
    5064  enum SECOND_VECTOR_MODE
    5065  {
    5066  SECOND_VECTOR_EMPTY,
    5067  /*
    5068  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5069  all have smaller offset.
    5070  */
    5071  SECOND_VECTOR_RING_BUFFER,
    5072  /*
    5073  Suballocations in 2nd vector are upper side of double stack.
    5074  They all have offsets higher than those in 1st vector.
    5075  Top of this stack means smaller offsets, but higher indices in this vector.
    5076  */
    5077  SECOND_VECTOR_DOUBLE_STACK,
    5078  };
    5079 
    5080  VkDeviceSize m_SumFreeSize;
    5081  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5082  uint32_t m_1stVectorIndex;
    5083  SECOND_VECTOR_MODE m_2ndVectorMode;
    5084 
    5085  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5086  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5087  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5088  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5089 
    5090  // Number of items in 1st vector with hAllocation = null at the beginning.
    5091  size_t m_1stNullItemsBeginCount;
    5092  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5093  size_t m_1stNullItemsMiddleCount;
    5094  // Number of items in 2nd vector with hAllocation = null.
    5095  size_t m_2ndNullItemsCount;
    5096 
    5097  bool ShouldCompact1st() const;
    5098  void CleanupAfterFree();
    5099 };
    5100 
    5101 /*
    5102 - GetSize() is the original size of allocated memory block.
    5103 - m_UsableSize is this size aligned down to a power of two.
    5104  All allocations and calculations happen relative to m_UsableSize.
    5105 - GetUnusableSize() is the difference between them.
    5106  It is repoted as separate, unused range, not available for allocations.
    5107 
    5108 Node at level 0 has size = m_UsableSize.
    5109 Each next level contains nodes with size 2 times smaller than current level.
    5110 m_LevelCount is the maximum number of levels to use in the current object.
    5111 */
    5112 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5113 {
    5114  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5115 public:
    5116  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5117  virtual ~VmaBlockMetadata_Buddy();
    5118  virtual void Init(VkDeviceSize size);
    5119 
    5120  virtual bool Validate() const;
    5121  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5122  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5123  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5124  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5125 
    5126  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5127  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5128 
    5129 #if VMA_STATS_STRING_ENABLED
    5130  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5131 #endif
    5132 
    5133  virtual bool CreateAllocationRequest(
    5134  uint32_t currentFrameIndex,
    5135  uint32_t frameInUseCount,
    5136  VkDeviceSize bufferImageGranularity,
    5137  VkDeviceSize allocSize,
    5138  VkDeviceSize allocAlignment,
    5139  bool upperAddress,
    5140  VmaSuballocationType allocType,
    5141  bool canMakeOtherLost,
    5142  uint32_t strategy,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual bool MakeRequestedAllocationsLost(
    5146  uint32_t currentFrameIndex,
    5147  uint32_t frameInUseCount,
    5148  VmaAllocationRequest* pAllocationRequest);
    5149 
    5150  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5151 
    5152  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5153 
    5154  virtual void Alloc(
    5155  const VmaAllocationRequest& request,
    5156  VmaSuballocationType type,
    5157  VkDeviceSize allocSize,
    5158  bool upperAddress,
    5159  VmaAllocation hAllocation);
    5160 
    5161  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5162  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5163 
    5164 private:
    5165  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5166  static const size_t MAX_LEVELS = 30;
    5167 
    5168  struct ValidationContext
    5169  {
    5170  size_t calculatedAllocationCount;
    5171  size_t calculatedFreeCount;
    5172  VkDeviceSize calculatedSumFreeSize;
    5173 
    5174  ValidationContext() :
    5175  calculatedAllocationCount(0),
    5176  calculatedFreeCount(0),
    5177  calculatedSumFreeSize(0) { }
    5178  };
    5179 
    5180  struct Node
    5181  {
    5182  VkDeviceSize offset;
    5183  enum TYPE
    5184  {
    5185  TYPE_FREE,
    5186  TYPE_ALLOCATION,
    5187  TYPE_SPLIT,
    5188  TYPE_COUNT
    5189  } type;
    5190  Node* parent;
    5191  Node* buddy;
    5192 
    5193  union
    5194  {
    5195  struct
    5196  {
    5197  Node* prev;
    5198  Node* next;
    5199  } free;
    5200  struct
    5201  {
    5202  VmaAllocation alloc;
    5203  } allocation;
    5204  struct
    5205  {
    5206  Node* leftChild;
    5207  } split;
    5208  };
    5209  };
    5210 
    5211  // Size of the memory block aligned down to a power of two.
    5212  VkDeviceSize m_UsableSize;
    5213  uint32_t m_LevelCount;
    5214 
    5215  Node* m_Root;
    5216  struct {
    5217  Node* front;
    5218  Node* back;
    5219  } m_FreeList[MAX_LEVELS];
    5220  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5221  size_t m_AllocationCount;
    5222  // Number of nodes in the tree with type == TYPE_FREE.
    5223  size_t m_FreeCount;
    5224  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5225  VkDeviceSize m_SumFreeSize;
    5226 
    5227  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5228  void DeleteNode(Node* node);
    5229  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5230  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5231  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5232  // Alloc passed just for validation. Can be null.
    5233  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5234  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5235  // Adds node to the front of FreeList at given level.
    5236  // node->type must be FREE.
    5237  // node->free.prev, next can be undefined.
    5238  void AddToFreeListFront(uint32_t level, Node* node);
    5239  // Removes node from FreeList at given level.
    5240  // node->type must be FREE.
    5241  // node->free.prev, next stay untouched.
    5242  void RemoveFromFreeList(uint32_t level, Node* node);
    5243 
    5244 #if VMA_STATS_STRING_ENABLED
    5245  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5246 #endif
    5247 };
    5248 
    5249 /*
    5250 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5251 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5252 
    5253 Thread-safety: This class must be externally synchronized.
    5254 */
    5255 class VmaDeviceMemoryBlock
    5256 {
    5257  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5258 public:
    5259  VmaBlockMetadata* m_pMetadata;
    5260 
    5261  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5262 
    5263  ~VmaDeviceMemoryBlock()
    5264  {
    5265  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5266  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5267  }
    5268 
    5269  // Always call after construction.
    5270  void Init(
    5271  VmaAllocator hAllocator,
    5272  uint32_t newMemoryTypeIndex,
    5273  VkDeviceMemory newMemory,
    5274  VkDeviceSize newSize,
    5275  uint32_t id,
    5276  uint32_t algorithm);
    5277  // Always call before destruction.
    5278  void Destroy(VmaAllocator allocator);
    5279 
    5280  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5281  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5282  uint32_t GetId() const { return m_Id; }
    5283  void* GetMappedData() const { return m_pMappedData; }
    5284 
    5285  // Validates all data structures inside this object. If not valid, returns false.
    5286  bool Validate() const;
    5287 
    5288  VkResult CheckCorruption(VmaAllocator hAllocator);
    5289 
    5290  // ppData can be null.
    5291  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5292  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5293 
    5294  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5295  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296 
    5297  VkResult BindBufferMemory(
    5298  const VmaAllocator hAllocator,
    5299  const VmaAllocation hAllocation,
    5300  VkBuffer hBuffer);
    5301  VkResult BindImageMemory(
    5302  const VmaAllocator hAllocator,
    5303  const VmaAllocation hAllocation,
    5304  VkImage hImage);
    5305 
    5306 private:
    5307  uint32_t m_MemoryTypeIndex;
    5308  uint32_t m_Id;
    5309  VkDeviceMemory m_hMemory;
    5310 
    5311  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5312  // Also protects m_MapCount, m_pMappedData.
    5313  VMA_MUTEX m_Mutex;
    5314  uint32_t m_MapCount;
    5315  void* m_pMappedData;
    5316 };
    5317 
    5318 struct VmaPointerLess
    5319 {
    5320  bool operator()(const void* lhs, const void* rhs) const
    5321  {
    5322  return lhs < rhs;
    5323  }
    5324 };
    5325 
    5326 class VmaDefragmentator;
    5327 
    5328 /*
    5329 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5330 Vulkan memory type.
    5331 
    5332 Synchronized internally with a mutex.
    5333 */
    5334 struct VmaBlockVector
    5335 {
    5336  VMA_CLASS_NO_COPY(VmaBlockVector)
    5337 public:
    5338  VmaBlockVector(
    5339  VmaAllocator hAllocator,
    5340  uint32_t memoryTypeIndex,
    5341  VkDeviceSize preferredBlockSize,
    5342  size_t minBlockCount,
    5343  size_t maxBlockCount,
    5344  VkDeviceSize bufferImageGranularity,
    5345  uint32_t frameInUseCount,
    5346  bool isCustomPool,
    5347  bool explicitBlockSize,
    5348  uint32_t algorithm);
    5349  ~VmaBlockVector();
    5350 
    5351  VkResult CreateMinBlocks();
    5352 
    5353  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5354  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5355  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5356  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5357  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5358 
    5359  void GetPoolStats(VmaPoolStats* pStats);
    5360 
    5361  bool IsEmpty() const { return m_Blocks.empty(); }
    5362  bool IsCorruptionDetectionEnabled() const;
    5363 
    5364  VkResult Allocate(
    5365  VmaPool hCurrentPool,
    5366  uint32_t currentFrameIndex,
    5367  VkDeviceSize size,
    5368  VkDeviceSize alignment,
    5369  const VmaAllocationCreateInfo& createInfo,
    5370  VmaSuballocationType suballocType,
    5371  VmaAllocation* pAllocation);
    5372 
    5373  void Free(
    5374  VmaAllocation hAllocation);
    5375 
    5376  // Adds statistics of this BlockVector to pStats.
    5377  void AddStats(VmaStats* pStats);
    5378 
    5379 #if VMA_STATS_STRING_ENABLED
    5380  void PrintDetailedMap(class VmaJsonWriter& json);
    5381 #endif
    5382 
    5383  void MakePoolAllocationsLost(
    5384  uint32_t currentFrameIndex,
    5385  size_t* pLostAllocationCount);
    5386  VkResult CheckCorruption();
    5387 
    5388  VmaDefragmentator* EnsureDefragmentator(
    5389  VmaAllocator hAllocator,
    5390  uint32_t currentFrameIndex);
    5391 
    5392  VkResult Defragment(
    5393  VmaDefragmentationStats* pDefragmentationStats,
    5394  VkDeviceSize& maxBytesToMove,
    5395  uint32_t& maxAllocationsToMove);
    5396 
    5397  void DestroyDefragmentator();
    5398 
    5399 private:
    5400  friend class VmaDefragmentator;
    5401 
    5402  const VmaAllocator m_hAllocator;
    5403  const uint32_t m_MemoryTypeIndex;
    5404  const VkDeviceSize m_PreferredBlockSize;
    5405  const size_t m_MinBlockCount;
    5406  const size_t m_MaxBlockCount;
    5407  const VkDeviceSize m_BufferImageGranularity;
    5408  const uint32_t m_FrameInUseCount;
    5409  const bool m_IsCustomPool;
    5410  const bool m_ExplicitBlockSize;
    5411  const uint32_t m_Algorithm;
    5412  bool m_HasEmptyBlock;
    5413  VMA_MUTEX m_Mutex;
    5414  // Incrementally sorted by sumFreeSize, ascending.
    5415  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5416  /* There can be at most one allocation that is completely empty - a
    5417  hysteresis to avoid pessimistic case of alternating creation and destruction
    5418  of a VkDeviceMemory. */
    5419  VmaDefragmentator* m_pDefragmentator;
    5420  uint32_t m_NextBlockId;
    5421 
    5422  VkDeviceSize CalcMaxBlockSize() const;
    5423 
    5424  // Finds and removes given block from vector.
    5425  void Remove(VmaDeviceMemoryBlock* pBlock);
    5426 
    5427  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5428  // after this call.
    5429  void IncrementallySortBlocks();
    5430 
    5431  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5432  VkResult AllocateFromBlock(
    5433  VmaDeviceMemoryBlock* pBlock,
    5434  VmaPool hCurrentPool,
    5435  uint32_t currentFrameIndex,
    5436  VkDeviceSize size,
    5437  VkDeviceSize alignment,
    5438  VmaAllocationCreateFlags allocFlags,
    5439  void* pUserData,
    5440  VmaSuballocationType suballocType,
    5441  uint32_t strategy,
    5442  VmaAllocation* pAllocation);
    5443 
    5444  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5445 };
    5446 
    5447 struct VmaPool_T
    5448 {
    5449  VMA_CLASS_NO_COPY(VmaPool_T)
    5450 public:
    5451  VmaBlockVector m_BlockVector;
    5452 
    5453  VmaPool_T(
    5454  VmaAllocator hAllocator,
    5455  const VmaPoolCreateInfo& createInfo,
    5456  VkDeviceSize preferredBlockSize);
    5457  ~VmaPool_T();
    5458 
    5459  uint32_t GetId() const { return m_Id; }
    5460  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5461 
    5462 #if VMA_STATS_STRING_ENABLED
    5463  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5464 #endif
    5465 
    5466 private:
    5467  uint32_t m_Id;
    5468 };
    5469 
    5470 class VmaDefragmentator
    5471 {
    5472  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5473 private:
    5474  const VmaAllocator m_hAllocator;
    5475  VmaBlockVector* const m_pBlockVector;
    5476  uint32_t m_CurrentFrameIndex;
    5477  VkDeviceSize m_BytesMoved;
    5478  uint32_t m_AllocationsMoved;
    5479 
    5480  struct AllocationInfo
    5481  {
    5482  VmaAllocation m_hAllocation;
    5483  VkBool32* m_pChanged;
    5484 
    5485  AllocationInfo() :
    5486  m_hAllocation(VK_NULL_HANDLE),
    5487  m_pChanged(VMA_NULL)
    5488  {
    5489  }
    5490  };
    5491 
    5492  struct AllocationInfoSizeGreater
    5493  {
    5494  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5495  {
    5496  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5497  }
    5498  };
    5499 
    5500  // Used between AddAllocation and Defragment.
    5501  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5502 
    5503  struct BlockInfo
    5504  {
    5505  VmaDeviceMemoryBlock* m_pBlock;
    5506  bool m_HasNonMovableAllocations;
    5507  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5508 
    5509  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5510  m_pBlock(VMA_NULL),
    5511  m_HasNonMovableAllocations(true),
    5512  m_Allocations(pAllocationCallbacks),
    5513  m_pMappedDataForDefragmentation(VMA_NULL)
    5514  {
    5515  }
    5516 
    5517  void CalcHasNonMovableAllocations()
    5518  {
    5519  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5520  const size_t defragmentAllocCount = m_Allocations.size();
    5521  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5522  }
    5523 
    5524  void SortAllocationsBySizeDescecnding()
    5525  {
    5526  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5527  }
    5528 
    5529  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5530  void Unmap(VmaAllocator hAllocator);
    5531 
    5532  private:
    5533  // Not null if mapped for defragmentation only, not originally mapped.
    5534  void* m_pMappedDataForDefragmentation;
    5535  };
    5536 
    5537  struct BlockPointerLess
    5538  {
    5539  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5540  {
    5541  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5542  }
    5543  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5544  {
    5545  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5546  }
    5547  };
    5548 
    5549  // 1. Blocks with some non-movable allocations go first.
    5550  // 2. Blocks with smaller sumFreeSize go first.
    5551  struct BlockInfoCompareMoveDestination
    5552  {
    5553  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5554  {
    5555  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5556  {
    5557  return true;
    5558  }
    5559  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5560  {
    5561  return false;
    5562  }
    5563  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5564  {
    5565  return true;
    5566  }
    5567  return false;
    5568  }
    5569  };
    5570 
    5571  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5572  BlockInfoVector m_Blocks;
    5573 
    5574  VkResult DefragmentRound(
    5575  VkDeviceSize maxBytesToMove,
    5576  uint32_t maxAllocationsToMove);
    5577 
    5578  static bool MoveMakesSense(
    5579  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5580  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5581 
    5582 public:
    5583  VmaDefragmentator(
    5584  VmaAllocator hAllocator,
    5585  VmaBlockVector* pBlockVector,
    5586  uint32_t currentFrameIndex);
    5587 
    5588  ~VmaDefragmentator();
    5589 
    5590  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5591  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5592 
    5593  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5594 
    5595  VkResult Defragment(
    5596  VkDeviceSize maxBytesToMove,
    5597  uint32_t maxAllocationsToMove);
    5598 };
    5599 
    5600 #if VMA_RECORDING_ENABLED
    5601 
    5602 class VmaRecorder
    5603 {
    5604 public:
    5605  VmaRecorder();
    5606  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5607  void WriteConfiguration(
    5608  const VkPhysicalDeviceProperties& devProps,
    5609  const VkPhysicalDeviceMemoryProperties& memProps,
    5610  bool dedicatedAllocationExtensionEnabled);
    5611  ~VmaRecorder();
    5612 
    5613  void RecordCreateAllocator(uint32_t frameIndex);
    5614  void RecordDestroyAllocator(uint32_t frameIndex);
    5615  void RecordCreatePool(uint32_t frameIndex,
    5616  const VmaPoolCreateInfo& createInfo,
    5617  VmaPool pool);
    5618  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5619  void RecordAllocateMemory(uint32_t frameIndex,
    5620  const VkMemoryRequirements& vkMemReq,
    5621  const VmaAllocationCreateInfo& createInfo,
    5622  VmaAllocation allocation);
    5623  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5624  const VkMemoryRequirements& vkMemReq,
    5625  bool requiresDedicatedAllocation,
    5626  bool prefersDedicatedAllocation,
    5627  const VmaAllocationCreateInfo& createInfo,
    5628  VmaAllocation allocation);
    5629  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5630  const VkMemoryRequirements& vkMemReq,
    5631  bool requiresDedicatedAllocation,
    5632  bool prefersDedicatedAllocation,
    5633  const VmaAllocationCreateInfo& createInfo,
    5634  VmaAllocation allocation);
    5635  void RecordFreeMemory(uint32_t frameIndex,
    5636  VmaAllocation allocation);
    5637  void RecordResizeAllocation(
    5638  uint32_t frameIndex,
    5639  VmaAllocation allocation,
    5640  VkDeviceSize newSize);
    5641  void RecordSetAllocationUserData(uint32_t frameIndex,
    5642  VmaAllocation allocation,
    5643  const void* pUserData);
    5644  void RecordCreateLostAllocation(uint32_t frameIndex,
    5645  VmaAllocation allocation);
    5646  void RecordMapMemory(uint32_t frameIndex,
    5647  VmaAllocation allocation);
    5648  void RecordUnmapMemory(uint32_t frameIndex,
    5649  VmaAllocation allocation);
    5650  void RecordFlushAllocation(uint32_t frameIndex,
    5651  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5652  void RecordInvalidateAllocation(uint32_t frameIndex,
    5653  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5654  void RecordCreateBuffer(uint32_t frameIndex,
    5655  const VkBufferCreateInfo& bufCreateInfo,
    5656  const VmaAllocationCreateInfo& allocCreateInfo,
    5657  VmaAllocation allocation);
    5658  void RecordCreateImage(uint32_t frameIndex,
    5659  const VkImageCreateInfo& imageCreateInfo,
    5660  const VmaAllocationCreateInfo& allocCreateInfo,
    5661  VmaAllocation allocation);
    5662  void RecordDestroyBuffer(uint32_t frameIndex,
    5663  VmaAllocation allocation);
    5664  void RecordDestroyImage(uint32_t frameIndex,
    5665  VmaAllocation allocation);
    5666  void RecordTouchAllocation(uint32_t frameIndex,
    5667  VmaAllocation allocation);
    5668  void RecordGetAllocationInfo(uint32_t frameIndex,
    5669  VmaAllocation allocation);
    5670  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5671  VmaPool pool);
    5672 
    5673 private:
    5674  struct CallParams
    5675  {
    5676  uint32_t threadId;
    5677  double time;
    5678  };
    5679 
    5680  class UserDataString
    5681  {
    5682  public:
    5683  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5684  const char* GetString() const { return m_Str; }
    5685 
    5686  private:
    5687  char m_PtrStr[17];
    5688  const char* m_Str;
    5689  };
    5690 
    5691  bool m_UseMutex;
    5692  VmaRecordFlags m_Flags;
    5693  FILE* m_File;
    5694  VMA_MUTEX m_FileMutex;
    5695  int64_t m_Freq;
    5696  int64_t m_StartCounter;
    5697 
    5698  void GetBasicParams(CallParams& outParams);
    5699  void Flush();
    5700 };
    5701 
    5702 #endif // #if VMA_RECORDING_ENABLED
    5703 
    5704 // Main allocator object.
    5705 struct VmaAllocator_T
    5706 {
    5707  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5708 public:
    5709  bool m_UseMutex;
    5710  bool m_UseKhrDedicatedAllocation;
    5711  VkDevice m_hDevice;
    5712  bool m_AllocationCallbacksSpecified;
    5713  VkAllocationCallbacks m_AllocationCallbacks;
    5714  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5715 
    5716  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5717  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5718  VMA_MUTEX m_HeapSizeLimitMutex;
    5719 
    5720  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5721  VkPhysicalDeviceMemoryProperties m_MemProps;
    5722 
    5723  // Default pools.
    5724  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5725 
    5726  // Each vector is sorted by memory (handle value).
    5727  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5728  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5729  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5730 
    5731  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5732  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  ~VmaAllocator_T();
    5734 
    5735  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5736  {
    5737  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5738  }
    5739  const VmaVulkanFunctions& GetVulkanFunctions() const
    5740  {
    5741  return m_VulkanFunctions;
    5742  }
    5743 
    5744  VkDeviceSize GetBufferImageGranularity() const
    5745  {
    5746  return VMA_MAX(
    5747  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5748  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5749  }
    5750 
    5751  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5752  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5753 
    5754  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5755  {
    5756  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5757  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5758  }
    5759  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5760  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5761  {
    5762  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5763  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5764  }
    5765  // Minimum alignment for all allocations in specific memory type.
    5766  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5767  {
    5768  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5769  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5770  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5771  }
    5772 
    5773  bool IsIntegratedGpu() const
    5774  {
    5775  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5776  }
    5777 
    5778 #if VMA_RECORDING_ENABLED
    5779  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5780 #endif
    5781 
    5782  void GetBufferMemoryRequirements(
    5783  VkBuffer hBuffer,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787  void GetImageMemoryRequirements(
    5788  VkImage hImage,
    5789  VkMemoryRequirements& memReq,
    5790  bool& requiresDedicatedAllocation,
    5791  bool& prefersDedicatedAllocation) const;
    5792 
    5793  // Main allocation function.
    5794  VkResult AllocateMemory(
    5795  const VkMemoryRequirements& vkMemReq,
    5796  bool requiresDedicatedAllocation,
    5797  bool prefersDedicatedAllocation,
    5798  VkBuffer dedicatedBuffer,
    5799  VkImage dedicatedImage,
    5800  const VmaAllocationCreateInfo& createInfo,
    5801  VmaSuballocationType suballocType,
    5802  VmaAllocation* pAllocation);
    5803 
    5804  // Main deallocation function.
    5805  void FreeMemory(const VmaAllocation allocation);
    5806 
    5807  VkResult ResizeAllocation(
    5808  const VmaAllocation alloc,
    5809  VkDeviceSize newSize);
    5810 
    5811  void CalculateStats(VmaStats* pStats);
    5812 
    5813 #if VMA_STATS_STRING_ENABLED
    5814  void PrintDetailedMap(class VmaJsonWriter& json);
    5815 #endif
    5816 
    5817  VkResult Defragment(
    5818  VmaAllocation* pAllocations,
    5819  size_t allocationCount,
    5820  VkBool32* pAllocationsChanged,
    5821  const VmaDefragmentationInfo* pDefragmentationInfo,
    5822  VmaDefragmentationStats* pDefragmentationStats);
    5823 
    5824  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5825  bool TouchAllocation(VmaAllocation hAllocation);
    5826 
    5827  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5828  void DestroyPool(VmaPool pool);
    5829  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5830 
    5831  void SetCurrentFrameIndex(uint32_t frameIndex);
    5832  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5833 
    5834  void MakePoolAllocationsLost(
    5835  VmaPool hPool,
    5836  size_t* pLostAllocationCount);
    5837  VkResult CheckPoolCorruption(VmaPool hPool);
    5838  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5839 
    5840  void CreateLostAllocation(VmaAllocation* pAllocation);
    5841 
    5842  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5843  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5844 
    5845  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5846  void Unmap(VmaAllocation hAllocation);
    5847 
    5848  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5849  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5850 
    5851  void FlushOrInvalidateAllocation(
    5852  VmaAllocation hAllocation,
    5853  VkDeviceSize offset, VkDeviceSize size,
    5854  VMA_CACHE_OPERATION op);
    5855 
    5856  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5857 
    5858 private:
    5859  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5860 
    5861  VkPhysicalDevice m_PhysicalDevice;
    5862  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5863 
    5864  VMA_MUTEX m_PoolsMutex;
    5865  // Protected by m_PoolsMutex. Sorted by pointer value.
    5866  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5867  uint32_t m_NextPoolId;
    5868 
    5869  VmaVulkanFunctions m_VulkanFunctions;
    5870 
    5871 #if VMA_RECORDING_ENABLED
    5872  VmaRecorder* m_pRecorder;
    5873 #endif
    5874 
    5875  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5876 
    5877  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5878 
    5879  VkResult AllocateMemoryOfType(
    5880  VkDeviceSize size,
    5881  VkDeviceSize alignment,
    5882  bool dedicatedAllocation,
    5883  VkBuffer dedicatedBuffer,
    5884  VkImage dedicatedImage,
    5885  const VmaAllocationCreateInfo& createInfo,
    5886  uint32_t memTypeIndex,
    5887  VmaSuballocationType suballocType,
    5888  VmaAllocation* pAllocation);
    5889 
    5890  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5891  VkResult AllocateDedicatedMemory(
    5892  VkDeviceSize size,
    5893  VmaSuballocationType suballocType,
    5894  uint32_t memTypeIndex,
    5895  bool map,
    5896  bool isUserDataString,
    5897  void* pUserData,
    5898  VkBuffer dedicatedBuffer,
    5899  VkImage dedicatedImage,
    5900  VmaAllocation* pAllocation);
    5901 
    5902  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5903  void FreeDedicatedMemory(VmaAllocation allocation);
    5904 };
    5905 
    5907 // Memory allocation #2 after VmaAllocator_T definition
    5908 
    5909 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5910 {
    5911  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5912 }
    5913 
    5914 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5915 {
    5916  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5917 }
    5918 
    5919 template<typename T>
    5920 static T* VmaAllocate(VmaAllocator hAllocator)
    5921 {
    5922  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5923 }
    5924 
    5925 template<typename T>
    5926 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5927 {
    5928  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5929 }
    5930 
    5931 template<typename T>
    5932 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5933 {
    5934  if(ptr != VMA_NULL)
    5935  {
    5936  ptr->~T();
    5937  VmaFree(hAllocator, ptr);
    5938  }
    5939 }
    5940 
    5941 template<typename T>
    5942 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5943 {
    5944  if(ptr != VMA_NULL)
    5945  {
    5946  for(size_t i = count; i--; )
    5947  ptr[i].~T();
    5948  VmaFree(hAllocator, ptr);
    5949  }
    5950 }
    5951 
    5953 // VmaStringBuilder
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaStringBuilder
    5958 {
    5959 public:
    5960  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5961  size_t GetLength() const { return m_Data.size(); }
    5962  const char* GetData() const { return m_Data.data(); }
    5963 
    5964  void Add(char ch) { m_Data.push_back(ch); }
    5965  void Add(const char* pStr);
    5966  void AddNewLine() { Add('\n'); }
    5967  void AddNumber(uint32_t num);
    5968  void AddNumber(uint64_t num);
    5969  void AddPointer(const void* ptr);
    5970 
    5971 private:
    5972  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5973 };
    5974 
    5975 void VmaStringBuilder::Add(const char* pStr)
    5976 {
    5977  const size_t strLen = strlen(pStr);
    5978  if(strLen > 0)
    5979  {
    5980  const size_t oldCount = m_Data.size();
    5981  m_Data.resize(oldCount + strLen);
    5982  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5983  }
    5984 }
    5985 
    5986 void VmaStringBuilder::AddNumber(uint32_t num)
    5987 {
    5988  char buf[11];
    5989  VmaUint32ToStr(buf, sizeof(buf), num);
    5990  Add(buf);
    5991 }
    5992 
    5993 void VmaStringBuilder::AddNumber(uint64_t num)
    5994 {
    5995  char buf[21];
    5996  VmaUint64ToStr(buf, sizeof(buf), num);
    5997  Add(buf);
    5998 }
    5999 
    6000 void VmaStringBuilder::AddPointer(const void* ptr)
    6001 {
    6002  char buf[21];
    6003  VmaPtrToStr(buf, sizeof(buf), ptr);
    6004  Add(buf);
    6005 }
    6006 
    6007 #endif // #if VMA_STATS_STRING_ENABLED
    6008 
    6010 // VmaJsonWriter
    6011 
    6012 #if VMA_STATS_STRING_ENABLED
    6013 
    6014 class VmaJsonWriter
    6015 {
    6016  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6017 public:
    6018  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6019  ~VmaJsonWriter();
    6020 
    6021  void BeginObject(bool singleLine = false);
    6022  void EndObject();
    6023 
    6024  void BeginArray(bool singleLine = false);
    6025  void EndArray();
    6026 
    6027  void WriteString(const char* pStr);
    6028  void BeginString(const char* pStr = VMA_NULL);
    6029  void ContinueString(const char* pStr);
    6030  void ContinueString(uint32_t n);
    6031  void ContinueString(uint64_t n);
    6032  void ContinueString_Pointer(const void* ptr);
    6033  void EndString(const char* pStr = VMA_NULL);
    6034 
    6035  void WriteNumber(uint32_t n);
    6036  void WriteNumber(uint64_t n);
    6037  void WriteBool(bool b);
    6038  void WriteNull();
    6039 
    6040 private:
    6041  static const char* const INDENT;
    6042 
    6043  enum COLLECTION_TYPE
    6044  {
    6045  COLLECTION_TYPE_OBJECT,
    6046  COLLECTION_TYPE_ARRAY,
    6047  };
    6048  struct StackItem
    6049  {
    6050  COLLECTION_TYPE type;
    6051  uint32_t valueCount;
    6052  bool singleLineMode;
    6053  };
    6054 
    6055  VmaStringBuilder& m_SB;
    6056  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6057  bool m_InsideString;
    6058 
    6059  void BeginValue(bool isString);
    6060  void WriteIndent(bool oneLess = false);
    6061 };
    6062 
    6063 const char* const VmaJsonWriter::INDENT = " ";
    6064 
    6065 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6066  m_SB(sb),
    6067  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6068  m_InsideString(false)
    6069 {
    6070 }
    6071 
    6072 VmaJsonWriter::~VmaJsonWriter()
    6073 {
    6074  VMA_ASSERT(!m_InsideString);
    6075  VMA_ASSERT(m_Stack.empty());
    6076 }
    6077 
    6078 void VmaJsonWriter::BeginObject(bool singleLine)
    6079 {
    6080  VMA_ASSERT(!m_InsideString);
    6081 
    6082  BeginValue(false);
    6083  m_SB.Add('{');
    6084 
    6085  StackItem item;
    6086  item.type = COLLECTION_TYPE_OBJECT;
    6087  item.valueCount = 0;
    6088  item.singleLineMode = singleLine;
    6089  m_Stack.push_back(item);
    6090 }
    6091 
    6092 void VmaJsonWriter::EndObject()
    6093 {
    6094  VMA_ASSERT(!m_InsideString);
    6095 
    6096  WriteIndent(true);
    6097  m_SB.Add('}');
    6098 
    6099  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6100  m_Stack.pop_back();
    6101 }
    6102 
    6103 void VmaJsonWriter::BeginArray(bool singleLine)
    6104 {
    6105  VMA_ASSERT(!m_InsideString);
    6106 
    6107  BeginValue(false);
    6108  m_SB.Add('[');
    6109 
    6110  StackItem item;
    6111  item.type = COLLECTION_TYPE_ARRAY;
    6112  item.valueCount = 0;
    6113  item.singleLineMode = singleLine;
    6114  m_Stack.push_back(item);
    6115 }
    6116 
    6117 void VmaJsonWriter::EndArray()
    6118 {
    6119  VMA_ASSERT(!m_InsideString);
    6120 
    6121  WriteIndent(true);
    6122  m_SB.Add(']');
    6123 
    6124  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6125  m_Stack.pop_back();
    6126 }
    6127 
    6128 void VmaJsonWriter::WriteString(const char* pStr)
    6129 {
    6130  BeginString(pStr);
    6131  EndString();
    6132 }
    6133 
    6134 void VmaJsonWriter::BeginString(const char* pStr)
    6135 {
    6136  VMA_ASSERT(!m_InsideString);
    6137 
    6138  BeginValue(true);
    6139  m_SB.Add('"');
    6140  m_InsideString = true;
    6141  if(pStr != VMA_NULL && pStr[0] != '\0')
    6142  {
    6143  ContinueString(pStr);
    6144  }
    6145 }
    6146 
    6147 void VmaJsonWriter::ContinueString(const char* pStr)
    6148 {
    6149  VMA_ASSERT(m_InsideString);
    6150 
    6151  const size_t strLen = strlen(pStr);
    6152  for(size_t i = 0; i < strLen; ++i)
    6153  {
    6154  char ch = pStr[i];
    6155  if(ch == '\\')
    6156  {
    6157  m_SB.Add("\\\\");
    6158  }
    6159  else if(ch == '"')
    6160  {
    6161  m_SB.Add("\\\"");
    6162  }
    6163  else if(ch >= 32)
    6164  {
    6165  m_SB.Add(ch);
    6166  }
    6167  else switch(ch)
    6168  {
    6169  case '\b':
    6170  m_SB.Add("\\b");
    6171  break;
    6172  case '\f':
    6173  m_SB.Add("\\f");
    6174  break;
    6175  case '\n':
    6176  m_SB.Add("\\n");
    6177  break;
    6178  case '\r':
    6179  m_SB.Add("\\r");
    6180  break;
    6181  case '\t':
    6182  m_SB.Add("\\t");
    6183  break;
    6184  default:
    6185  VMA_ASSERT(0 && "Character not currently supported.");
    6186  break;
    6187  }
    6188  }
    6189 }
    6190 
    6191 void VmaJsonWriter::ContinueString(uint32_t n)
    6192 {
    6193  VMA_ASSERT(m_InsideString);
    6194  m_SB.AddNumber(n);
    6195 }
    6196 
    6197 void VmaJsonWriter::ContinueString(uint64_t n)
    6198 {
    6199  VMA_ASSERT(m_InsideString);
    6200  m_SB.AddNumber(n);
    6201 }
    6202 
    6203 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6204 {
    6205  VMA_ASSERT(m_InsideString);
    6206  m_SB.AddPointer(ptr);
    6207 }
    6208 
    6209 void VmaJsonWriter::EndString(const char* pStr)
    6210 {
    6211  VMA_ASSERT(m_InsideString);
    6212  if(pStr != VMA_NULL && pStr[0] != '\0')
    6213  {
    6214  ContinueString(pStr);
    6215  }
    6216  m_SB.Add('"');
    6217  m_InsideString = false;
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteNumber(uint32_t n)
    6221 {
    6222  VMA_ASSERT(!m_InsideString);
    6223  BeginValue(false);
    6224  m_SB.AddNumber(n);
    6225 }
    6226 
    6227 void VmaJsonWriter::WriteNumber(uint64_t n)
    6228 {
    6229  VMA_ASSERT(!m_InsideString);
    6230  BeginValue(false);
    6231  m_SB.AddNumber(n);
    6232 }
    6233 
    6234 void VmaJsonWriter::WriteBool(bool b)
    6235 {
    6236  VMA_ASSERT(!m_InsideString);
    6237  BeginValue(false);
    6238  m_SB.Add(b ? "true" : "false");
    6239 }
    6240 
    6241 void VmaJsonWriter::WriteNull()
    6242 {
    6243  VMA_ASSERT(!m_InsideString);
    6244  BeginValue(false);
    6245  m_SB.Add("null");
    6246 }
    6247 
    6248 void VmaJsonWriter::BeginValue(bool isString)
    6249 {
    6250  if(!m_Stack.empty())
    6251  {
    6252  StackItem& currItem = m_Stack.back();
    6253  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6254  currItem.valueCount % 2 == 0)
    6255  {
    6256  VMA_ASSERT(isString);
    6257  }
    6258 
    6259  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6260  currItem.valueCount % 2 != 0)
    6261  {
    6262  m_SB.Add(": ");
    6263  }
    6264  else if(currItem.valueCount > 0)
    6265  {
    6266  m_SB.Add(", ");
    6267  WriteIndent();
    6268  }
    6269  else
    6270  {
    6271  WriteIndent();
    6272  }
    6273  ++currItem.valueCount;
    6274  }
    6275 }
    6276 
    6277 void VmaJsonWriter::WriteIndent(bool oneLess)
    6278 {
    6279  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6280  {
    6281  m_SB.AddNewLine();
    6282 
    6283  size_t count = m_Stack.size();
    6284  if(count > 0 && oneLess)
    6285  {
    6286  --count;
    6287  }
    6288  for(size_t i = 0; i < count; ++i)
    6289  {
    6290  m_SB.Add(INDENT);
    6291  }
    6292  }
    6293 }
    6294 
    6295 #endif // #if VMA_STATS_STRING_ENABLED
    6296 
    6298 
    6299 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6300 {
    6301  if(IsUserDataString())
    6302  {
    6303  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6304 
    6305  FreeUserDataString(hAllocator);
    6306 
    6307  if(pUserData != VMA_NULL)
    6308  {
    6309  const char* const newStrSrc = (char*)pUserData;
    6310  const size_t newStrLen = strlen(newStrSrc);
    6311  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6312  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6313  m_pUserData = newStrDst;
    6314  }
    6315  }
    6316  else
    6317  {
    6318  m_pUserData = pUserData;
    6319  }
    6320 }
    6321 
    6322 void VmaAllocation_T::ChangeBlockAllocation(
    6323  VmaAllocator hAllocator,
    6324  VmaDeviceMemoryBlock* block,
    6325  VkDeviceSize offset)
    6326 {
    6327  VMA_ASSERT(block != VMA_NULL);
    6328  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6329 
    6330  // Move mapping reference counter from old block to new block.
    6331  if(block != m_BlockAllocation.m_Block)
    6332  {
    6333  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6334  if(IsPersistentMap())
    6335  ++mapRefCount;
    6336  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6337  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6338  }
    6339 
    6340  m_BlockAllocation.m_Block = block;
    6341  m_BlockAllocation.m_Offset = offset;
    6342 }
    6343 
    6344 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6345 {
    6346  VMA_ASSERT(newSize > 0);
    6347  m_Size = newSize;
    6348 }
    6349 
    6350 VkDeviceSize VmaAllocation_T::GetOffset() const
    6351 {
    6352  switch(m_Type)
    6353  {
    6354  case ALLOCATION_TYPE_BLOCK:
    6355  return m_BlockAllocation.m_Offset;
    6356  case ALLOCATION_TYPE_DEDICATED:
    6357  return 0;
    6358  default:
    6359  VMA_ASSERT(0);
    6360  return 0;
    6361  }
    6362 }
    6363 
    6364 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6365 {
    6366  switch(m_Type)
    6367  {
    6368  case ALLOCATION_TYPE_BLOCK:
    6369  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6370  case ALLOCATION_TYPE_DEDICATED:
    6371  return m_DedicatedAllocation.m_hMemory;
    6372  default:
    6373  VMA_ASSERT(0);
    6374  return VK_NULL_HANDLE;
    6375  }
    6376 }
    6377 
    6378 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6379 {
    6380  switch(m_Type)
    6381  {
    6382  case ALLOCATION_TYPE_BLOCK:
    6383  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6384  case ALLOCATION_TYPE_DEDICATED:
    6385  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6386  default:
    6387  VMA_ASSERT(0);
    6388  return UINT32_MAX;
    6389  }
    6390 }
    6391 
    6392 void* VmaAllocation_T::GetMappedData() const
    6393 {
    6394  switch(m_Type)
    6395  {
    6396  case ALLOCATION_TYPE_BLOCK:
    6397  if(m_MapCount != 0)
    6398  {
    6399  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6400  VMA_ASSERT(pBlockData != VMA_NULL);
    6401  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6402  }
    6403  else
    6404  {
    6405  return VMA_NULL;
    6406  }
    6407  break;
    6408  case ALLOCATION_TYPE_DEDICATED:
    6409  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6410  return m_DedicatedAllocation.m_pMappedData;
    6411  default:
    6412  VMA_ASSERT(0);
    6413  return VMA_NULL;
    6414  }
    6415 }
    6416 
    6417 bool VmaAllocation_T::CanBecomeLost() const
    6418 {
    6419  switch(m_Type)
    6420  {
    6421  case ALLOCATION_TYPE_BLOCK:
    6422  return m_BlockAllocation.m_CanBecomeLost;
    6423  case ALLOCATION_TYPE_DEDICATED:
    6424  return false;
    6425  default:
    6426  VMA_ASSERT(0);
    6427  return false;
    6428  }
    6429 }
    6430 
    6431 VmaPool VmaAllocation_T::GetPool() const
    6432 {
    6433  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6434  return m_BlockAllocation.m_hPool;
    6435 }
    6436 
    6437 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6438 {
    6439  VMA_ASSERT(CanBecomeLost());
    6440 
    6441  /*
    6442  Warning: This is a carefully designed algorithm.
    6443  Do not modify unless you really know what you're doing :)
    6444  */
    6445  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6446  for(;;)
    6447  {
    6448  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6449  {
    6450  VMA_ASSERT(0);
    6451  return false;
    6452  }
    6453  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6454  {
    6455  return false;
    6456  }
    6457  else // Last use time earlier than current time.
    6458  {
    6459  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6460  {
    6461  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6462  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6463  return true;
    6464  }
    6465  }
    6466  }
    6467 }
    6468 
    6469 #if VMA_STATS_STRING_ENABLED
    6470 
    6471 // Correspond to values of enum VmaSuballocationType.
    6472 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6473  "FREE",
    6474  "UNKNOWN",
    6475  "BUFFER",
    6476  "IMAGE_UNKNOWN",
    6477  "IMAGE_LINEAR",
    6478  "IMAGE_OPTIMAL",
    6479 };
    6480 
    6481 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6482 {
    6483  json.WriteString("Type");
    6484  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6485 
    6486  json.WriteString("Size");
    6487  json.WriteNumber(m_Size);
    6488 
    6489  if(m_pUserData != VMA_NULL)
    6490  {
    6491  json.WriteString("UserData");
    6492  if(IsUserDataString())
    6493  {
    6494  json.WriteString((const char*)m_pUserData);
    6495  }
    6496  else
    6497  {
    6498  json.BeginString();
    6499  json.ContinueString_Pointer(m_pUserData);
    6500  json.EndString();
    6501  }
    6502  }
    6503 
    6504  json.WriteString("CreationFrameIndex");
    6505  json.WriteNumber(m_CreationFrameIndex);
    6506 
    6507  json.WriteString("LastUseFrameIndex");
    6508  json.WriteNumber(GetLastUseFrameIndex());
    6509 
    6510  if(m_BufferImageUsage != 0)
    6511  {
    6512  json.WriteString("Usage");
    6513  json.WriteNumber(m_BufferImageUsage);
    6514  }
    6515 }
    6516 
    6517 #endif
    6518 
    6519 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6520 {
    6521  VMA_ASSERT(IsUserDataString());
    6522  if(m_pUserData != VMA_NULL)
    6523  {
    6524  char* const oldStr = (char*)m_pUserData;
    6525  const size_t oldStrLen = strlen(oldStr);
    6526  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6527  m_pUserData = VMA_NULL;
    6528  }
    6529 }
    6530 
    6531 void VmaAllocation_T::BlockAllocMap()
    6532 {
    6533  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6534 
    6535  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6536  {
    6537  ++m_MapCount;
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6542  }
    6543 }
    6544 
    6545 void VmaAllocation_T::BlockAllocUnmap()
    6546 {
    6547  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6548 
    6549  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6550  {
    6551  --m_MapCount;
    6552  }
    6553  else
    6554  {
    6555  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6556  }
    6557 }
    6558 
    6559 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6560 {
    6561  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6562 
    6563  if(m_MapCount != 0)
    6564  {
    6565  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6566  {
    6567  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6568  *ppData = m_DedicatedAllocation.m_pMappedData;
    6569  ++m_MapCount;
    6570  return VK_SUCCESS;
    6571  }
    6572  else
    6573  {
    6574  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6575  return VK_ERROR_MEMORY_MAP_FAILED;
    6576  }
    6577  }
    6578  else
    6579  {
    6580  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6581  hAllocator->m_hDevice,
    6582  m_DedicatedAllocation.m_hMemory,
    6583  0, // offset
    6584  VK_WHOLE_SIZE,
    6585  0, // flags
    6586  ppData);
    6587  if(result == VK_SUCCESS)
    6588  {
    6589  m_DedicatedAllocation.m_pMappedData = *ppData;
    6590  m_MapCount = 1;
    6591  }
    6592  return result;
    6593  }
    6594 }
    6595 
    6596 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6597 {
    6598  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6599 
    6600  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6601  {
    6602  --m_MapCount;
    6603  if(m_MapCount == 0)
    6604  {
    6605  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6606  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6607  hAllocator->m_hDevice,
    6608  m_DedicatedAllocation.m_hMemory);
    6609  }
    6610  }
    6611  else
    6612  {
    6613  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6614  }
    6615 }
    6616 
    6617 #if VMA_STATS_STRING_ENABLED
    6618 
    6619 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6620 {
    6621  json.BeginObject();
    6622 
    6623  json.WriteString("Blocks");
    6624  json.WriteNumber(stat.blockCount);
    6625 
    6626  json.WriteString("Allocations");
    6627  json.WriteNumber(stat.allocationCount);
    6628 
    6629  json.WriteString("UnusedRanges");
    6630  json.WriteNumber(stat.unusedRangeCount);
    6631 
    6632  json.WriteString("UsedBytes");
    6633  json.WriteNumber(stat.usedBytes);
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(stat.unusedBytes);
    6637 
    6638  if(stat.allocationCount > 1)
    6639  {
    6640  json.WriteString("AllocationSize");
    6641  json.BeginObject(true);
    6642  json.WriteString("Min");
    6643  json.WriteNumber(stat.allocationSizeMin);
    6644  json.WriteString("Avg");
    6645  json.WriteNumber(stat.allocationSizeAvg);
    6646  json.WriteString("Max");
    6647  json.WriteNumber(stat.allocationSizeMax);
    6648  json.EndObject();
    6649  }
    6650 
    6651  if(stat.unusedRangeCount > 1)
    6652  {
    6653  json.WriteString("UnusedRangeSize");
    6654  json.BeginObject(true);
    6655  json.WriteString("Min");
    6656  json.WriteNumber(stat.unusedRangeSizeMin);
    6657  json.WriteString("Avg");
    6658  json.WriteNumber(stat.unusedRangeSizeAvg);
    6659  json.WriteString("Max");
    6660  json.WriteNumber(stat.unusedRangeSizeMax);
    6661  json.EndObject();
    6662  }
    6663 
    6664  json.EndObject();
    6665 }
    6666 
    6667 #endif // #if VMA_STATS_STRING_ENABLED
    6668 
    6669 struct VmaSuballocationItemSizeLess
    6670 {
    6671  bool operator()(
    6672  const VmaSuballocationList::iterator lhs,
    6673  const VmaSuballocationList::iterator rhs) const
    6674  {
    6675  return lhs->size < rhs->size;
    6676  }
    6677  bool operator()(
    6678  const VmaSuballocationList::iterator lhs,
    6679  VkDeviceSize rhsSize) const
    6680  {
    6681  return lhs->size < rhsSize;
    6682  }
    6683 };
    6684 
    6685 
    6687 // class VmaBlockMetadata
    6688 
    6689 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6690  m_Size(0),
    6691  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6692 {
    6693 }
    6694 
    6695 #if VMA_STATS_STRING_ENABLED
    6696 
    6697 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6698  VkDeviceSize unusedBytes,
    6699  size_t allocationCount,
    6700  size_t unusedRangeCount) const
    6701 {
    6702  json.BeginObject();
    6703 
    6704  json.WriteString("TotalBytes");
    6705  json.WriteNumber(GetSize());
    6706 
    6707  json.WriteString("UnusedBytes");
    6708  json.WriteNumber(unusedBytes);
    6709 
    6710  json.WriteString("Allocations");
    6711  json.WriteNumber((uint64_t)allocationCount);
    6712 
    6713  json.WriteString("UnusedRanges");
    6714  json.WriteNumber((uint64_t)unusedRangeCount);
    6715 
    6716  json.WriteString("Suballocations");
    6717  json.BeginArray();
    6718 }
    6719 
    6720 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6721  VkDeviceSize offset,
    6722  VmaAllocation hAllocation) const
    6723 {
    6724  json.BeginObject(true);
    6725 
    6726  json.WriteString("Offset");
    6727  json.WriteNumber(offset);
    6728 
    6729  hAllocation->PrintParameters(json);
    6730 
    6731  json.EndObject();
    6732 }
    6733 
    6734 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6735  VkDeviceSize offset,
    6736  VkDeviceSize size) const
    6737 {
    6738  json.BeginObject(true);
    6739 
    6740  json.WriteString("Offset");
    6741  json.WriteNumber(offset);
    6742 
    6743  json.WriteString("Type");
    6744  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6745 
    6746  json.WriteString("Size");
    6747  json.WriteNumber(size);
    6748 
    6749  json.EndObject();
    6750 }
    6751 
    6752 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6753 {
    6754  json.EndArray();
    6755  json.EndObject();
    6756 }
    6757 
    6758 #endif // #if VMA_STATS_STRING_ENABLED
    6759 
    6761 // class VmaBlockMetadata_Generic
    6762 
    6763 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6764  VmaBlockMetadata(hAllocator),
    6765  m_FreeCount(0),
    6766  m_SumFreeSize(0),
    6767  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6768  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6769 {
    6770 }
    6771 
    6772 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6773 {
    6774 }
    6775 
    6776 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6777 {
    6778  VmaBlockMetadata::Init(size);
    6779 
    6780  m_FreeCount = 1;
    6781  m_SumFreeSize = size;
    6782 
    6783  VmaSuballocation suballoc = {};
    6784  suballoc.offset = 0;
    6785  suballoc.size = size;
    6786  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6787  suballoc.hAllocation = VK_NULL_HANDLE;
    6788 
    6789  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6790  m_Suballocations.push_back(suballoc);
    6791  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6792  --suballocItem;
    6793  m_FreeSuballocationsBySize.push_back(suballocItem);
    6794 }
    6795 
    6796 bool VmaBlockMetadata_Generic::Validate() const
    6797 {
    6798  VMA_VALIDATE(!m_Suballocations.empty());
    6799 
    6800  // Expected offset of new suballocation as calculated from previous ones.
    6801  VkDeviceSize calculatedOffset = 0;
    6802  // Expected number of free suballocations as calculated from traversing their list.
    6803  uint32_t calculatedFreeCount = 0;
    6804  // Expected sum size of free suballocations as calculated from traversing their list.
    6805  VkDeviceSize calculatedSumFreeSize = 0;
    6806  // Expected number of free suballocations that should be registered in
    6807  // m_FreeSuballocationsBySize calculated from traversing their list.
    6808  size_t freeSuballocationsToRegister = 0;
    6809  // True if previous visited suballocation was free.
    6810  bool prevFree = false;
    6811 
    6812  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6813  suballocItem != m_Suballocations.cend();
    6814  ++suballocItem)
    6815  {
    6816  const VmaSuballocation& subAlloc = *suballocItem;
    6817 
    6818  // Actual offset of this suballocation doesn't match expected one.
    6819  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6820 
    6821  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6822  // Two adjacent free suballocations are invalid. They should be merged.
    6823  VMA_VALIDATE(!prevFree || !currFree);
    6824 
    6825  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6826 
    6827  if(currFree)
    6828  {
    6829  calculatedSumFreeSize += subAlloc.size;
    6830  ++calculatedFreeCount;
    6831  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6832  {
    6833  ++freeSuballocationsToRegister;
    6834  }
    6835 
    6836  // Margin required between allocations - every free space must be at least that large.
    6837  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6838  }
    6839  else
    6840  {
    6841  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6843 
    6844  // Margin required between allocations - previous allocation must be free.
    6845  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6846  }
    6847 
    6848  calculatedOffset += subAlloc.size;
    6849  prevFree = currFree;
    6850  }
    6851 
    6852  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6853  // match expected one.
    6854  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6855 
    6856  VkDeviceSize lastSize = 0;
    6857  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6858  {
    6859  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6860 
    6861  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6862  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6863  // They must be sorted by size ascending.
    6864  VMA_VALIDATE(suballocItem->size >= lastSize);
    6865 
    6866  lastSize = suballocItem->size;
    6867  }
    6868 
    6869  // Check if totals match calculacted values.
    6870  VMA_VALIDATE(ValidateFreeSuballocationList());
    6871  VMA_VALIDATE(calculatedOffset == GetSize());
    6872  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6873  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6874 
    6875  return true;
    6876 }
    6877 
    6878 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6879 {
    6880  if(!m_FreeSuballocationsBySize.empty())
    6881  {
    6882  return m_FreeSuballocationsBySize.back()->size;
    6883  }
    6884  else
    6885  {
    6886  return 0;
    6887  }
    6888 }
    6889 
    6890 bool VmaBlockMetadata_Generic::IsEmpty() const
    6891 {
    6892  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6893 }
    6894 
    6895 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6896 {
    6897  outInfo.blockCount = 1;
    6898 
    6899  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6900  outInfo.allocationCount = rangeCount - m_FreeCount;
    6901  outInfo.unusedRangeCount = m_FreeCount;
    6902 
    6903  outInfo.unusedBytes = m_SumFreeSize;
    6904  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6905 
    6906  outInfo.allocationSizeMin = UINT64_MAX;
    6907  outInfo.allocationSizeMax = 0;
    6908  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6909  outInfo.unusedRangeSizeMax = 0;
    6910 
    6911  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6912  suballocItem != m_Suballocations.cend();
    6913  ++suballocItem)
    6914  {
    6915  const VmaSuballocation& suballoc = *suballocItem;
    6916  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6917  {
    6918  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6919  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6920  }
    6921  else
    6922  {
    6923  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6924  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6925  }
    6926  }
    6927 }
    6928 
    6929 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6930 {
    6931  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6932 
    6933  inoutStats.size += GetSize();
    6934  inoutStats.unusedSize += m_SumFreeSize;
    6935  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6936  inoutStats.unusedRangeCount += m_FreeCount;
    6937  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6938 }
    6939 
    6940 #if VMA_STATS_STRING_ENABLED
    6941 
    6942 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6943 {
    6944  PrintDetailedMap_Begin(json,
    6945  m_SumFreeSize, // unusedBytes
    6946  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6947  m_FreeCount); // unusedRangeCount
    6948 
    6949  size_t i = 0;
    6950  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6951  suballocItem != m_Suballocations.cend();
    6952  ++suballocItem, ++i)
    6953  {
    6954  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6955  {
    6956  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6957  }
    6958  else
    6959  {
    6960  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6961  }
    6962  }
    6963 
    6964  PrintDetailedMap_End(json);
    6965 }
    6966 
    6967 #endif // #if VMA_STATS_STRING_ENABLED
    6968 
    6969 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6970  uint32_t currentFrameIndex,
    6971  uint32_t frameInUseCount,
    6972  VkDeviceSize bufferImageGranularity,
    6973  VkDeviceSize allocSize,
    6974  VkDeviceSize allocAlignment,
    6975  bool upperAddress,
    6976  VmaSuballocationType allocType,
    6977  bool canMakeOtherLost,
    6978  uint32_t strategy,
    6979  VmaAllocationRequest* pAllocationRequest)
    6980 {
    6981  VMA_ASSERT(allocSize > 0);
    6982  VMA_ASSERT(!upperAddress);
    6983  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6984  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6985  VMA_HEAVY_ASSERT(Validate());
    6986 
    6987  // There is not enough total free space in this block to fullfill the request: Early return.
    6988  if(canMakeOtherLost == false &&
    6989  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6990  {
    6991  return false;
    6992  }
    6993 
    6994  // New algorithm, efficiently searching freeSuballocationsBySize.
    6995  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6996  if(freeSuballocCount > 0)
    6997  {
    6999  {
    7000  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7001  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7002  m_FreeSuballocationsBySize.data(),
    7003  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7004  allocSize + 2 * VMA_DEBUG_MARGIN,
    7005  VmaSuballocationItemSizeLess());
    7006  size_t index = it - m_FreeSuballocationsBySize.data();
    7007  for(; index < freeSuballocCount; ++index)
    7008  {
    7009  if(CheckAllocation(
    7010  currentFrameIndex,
    7011  frameInUseCount,
    7012  bufferImageGranularity,
    7013  allocSize,
    7014  allocAlignment,
    7015  allocType,
    7016  m_FreeSuballocationsBySize[index],
    7017  false, // canMakeOtherLost
    7018  &pAllocationRequest->offset,
    7019  &pAllocationRequest->itemsToMakeLostCount,
    7020  &pAllocationRequest->sumFreeSize,
    7021  &pAllocationRequest->sumItemSize))
    7022  {
    7023  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7024  return true;
    7025  }
    7026  }
    7027  }
    7028  else // WORST_FIT, FIRST_FIT
    7029  {
    7030  // Search staring from biggest suballocations.
    7031  for(size_t index = freeSuballocCount; index--; )
    7032  {
    7033  if(CheckAllocation(
    7034  currentFrameIndex,
    7035  frameInUseCount,
    7036  bufferImageGranularity,
    7037  allocSize,
    7038  allocAlignment,
    7039  allocType,
    7040  m_FreeSuballocationsBySize[index],
    7041  false, // canMakeOtherLost
    7042  &pAllocationRequest->offset,
    7043  &pAllocationRequest->itemsToMakeLostCount,
    7044  &pAllocationRequest->sumFreeSize,
    7045  &pAllocationRequest->sumItemSize))
    7046  {
    7047  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7048  return true;
    7049  }
    7050  }
    7051  }
    7052  }
    7053 
    7054  if(canMakeOtherLost)
    7055  {
    7056  // Brute-force algorithm. TODO: Come up with something better.
    7057 
    7058  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7059  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7060 
    7061  VmaAllocationRequest tmpAllocRequest = {};
    7062  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7063  suballocIt != m_Suballocations.end();
    7064  ++suballocIt)
    7065  {
    7066  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7067  suballocIt->hAllocation->CanBecomeLost())
    7068  {
    7069  if(CheckAllocation(
    7070  currentFrameIndex,
    7071  frameInUseCount,
    7072  bufferImageGranularity,
    7073  allocSize,
    7074  allocAlignment,
    7075  allocType,
    7076  suballocIt,
    7077  canMakeOtherLost,
    7078  &tmpAllocRequest.offset,
    7079  &tmpAllocRequest.itemsToMakeLostCount,
    7080  &tmpAllocRequest.sumFreeSize,
    7081  &tmpAllocRequest.sumItemSize))
    7082  {
    7083  tmpAllocRequest.item = suballocIt;
    7084 
    7085  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7087  {
    7088  *pAllocationRequest = tmpAllocRequest;
    7089  }
    7090  }
    7091  }
    7092  }
    7093 
    7094  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7095  {
    7096  return true;
    7097  }
    7098  }
    7099 
    7100  return false;
    7101 }
    7102 
    7103 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7104  uint32_t currentFrameIndex,
    7105  uint32_t frameInUseCount,
    7106  VmaAllocationRequest* pAllocationRequest)
    7107 {
    7108  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7109  {
    7110  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7111  {
    7112  ++pAllocationRequest->item;
    7113  }
    7114  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7115  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7117  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7118  {
    7119  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7120  --pAllocationRequest->itemsToMakeLostCount;
    7121  }
    7122  else
    7123  {
    7124  return false;
    7125  }
    7126  }
    7127 
    7128  VMA_HEAVY_ASSERT(Validate());
    7129  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7130  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7131 
    7132  return true;
    7133 }
    7134 
    7135 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7136 {
    7137  uint32_t lostAllocationCount = 0;
    7138  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7139  it != m_Suballocations.end();
    7140  ++it)
    7141  {
    7142  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7143  it->hAllocation->CanBecomeLost() &&
    7144  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7145  {
    7146  it = FreeSuballocation(it);
    7147  ++lostAllocationCount;
    7148  }
    7149  }
    7150  return lostAllocationCount;
    7151 }
    7152 
    7153 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7154 {
    7155  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7156  it != m_Suballocations.end();
    7157  ++it)
    7158  {
    7159  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7160  {
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7167  {
    7168  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7169  return VK_ERROR_VALIDATION_FAILED_EXT;
    7170  }
    7171  }
    7172  }
    7173 
    7174  return VK_SUCCESS;
    7175 }
    7176 
    7177 void VmaBlockMetadata_Generic::Alloc(
    7178  const VmaAllocationRequest& request,
    7179  VmaSuballocationType type,
    7180  VkDeviceSize allocSize,
    7181  bool upperAddress,
    7182  VmaAllocation hAllocation)
    7183 {
    7184  VMA_ASSERT(!upperAddress);
    7185  VMA_ASSERT(request.item != m_Suballocations.end());
    7186  VmaSuballocation& suballoc = *request.item;
    7187  // Given suballocation is a free block.
    7188  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7189  // Given offset is inside this suballocation.
    7190  VMA_ASSERT(request.offset >= suballoc.offset);
    7191  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7192  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7193  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7194 
    7195  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7196  // it to become used.
    7197  UnregisterFreeSuballocation(request.item);
    7198 
    7199  suballoc.offset = request.offset;
    7200  suballoc.size = allocSize;
    7201  suballoc.type = type;
    7202  suballoc.hAllocation = hAllocation;
    7203 
    7204  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7205  if(paddingEnd)
    7206  {
    7207  VmaSuballocation paddingSuballoc = {};
    7208  paddingSuballoc.offset = request.offset + allocSize;
    7209  paddingSuballoc.size = paddingEnd;
    7210  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7211  VmaSuballocationList::iterator next = request.item;
    7212  ++next;
    7213  const VmaSuballocationList::iterator paddingEndItem =
    7214  m_Suballocations.insert(next, paddingSuballoc);
    7215  RegisterFreeSuballocation(paddingEndItem);
    7216  }
    7217 
    7218  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7219  if(paddingBegin)
    7220  {
    7221  VmaSuballocation paddingSuballoc = {};
    7222  paddingSuballoc.offset = request.offset - paddingBegin;
    7223  paddingSuballoc.size = paddingBegin;
    7224  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7225  const VmaSuballocationList::iterator paddingBeginItem =
    7226  m_Suballocations.insert(request.item, paddingSuballoc);
    7227  RegisterFreeSuballocation(paddingBeginItem);
    7228  }
    7229 
    7230  // Update totals.
    7231  m_FreeCount = m_FreeCount - 1;
    7232  if(paddingBegin > 0)
    7233  {
    7234  ++m_FreeCount;
    7235  }
    7236  if(paddingEnd > 0)
    7237  {
    7238  ++m_FreeCount;
    7239  }
    7240  m_SumFreeSize -= allocSize;
    7241 }
    7242 
    7243 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7244 {
    7245  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7246  suballocItem != m_Suballocations.end();
    7247  ++suballocItem)
    7248  {
    7249  VmaSuballocation& suballoc = *suballocItem;
    7250  if(suballoc.hAllocation == allocation)
    7251  {
    7252  FreeSuballocation(suballocItem);
    7253  VMA_HEAVY_ASSERT(Validate());
    7254  return;
    7255  }
    7256  }
    7257  VMA_ASSERT(0 && "Not found!");
    7258 }
    7259 
    7260 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7261 {
    7262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7263  suballocItem != m_Suballocations.end();
    7264  ++suballocItem)
    7265  {
    7266  VmaSuballocation& suballoc = *suballocItem;
    7267  if(suballoc.offset == offset)
    7268  {
    7269  FreeSuballocation(suballocItem);
    7270  return;
    7271  }
    7272  }
    7273  VMA_ASSERT(0 && "Not found!");
    7274 }
    7275 
    7276 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7277 {
    7278  typedef VmaSuballocationList::iterator iter_type;
    7279  for(iter_type suballocItem = m_Suballocations.begin();
    7280  suballocItem != m_Suballocations.end();
    7281  ++suballocItem)
    7282  {
    7283  VmaSuballocation& suballoc = *suballocItem;
    7284  if(suballoc.hAllocation == alloc)
    7285  {
    7286  iter_type nextItem = suballocItem;
    7287  ++nextItem;
    7288 
    7289  // Should have been ensured on higher level.
    7290  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7291 
    7292  // Shrinking.
    7293  if(newSize < alloc->GetSize())
    7294  {
    7295  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7296 
    7297  // There is next item.
    7298  if(nextItem != m_Suballocations.end())
    7299  {
    7300  // Next item is free.
    7301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7302  {
    7303  // Grow this next item backward.
    7304  UnregisterFreeSuballocation(nextItem);
    7305  nextItem->offset -= sizeDiff;
    7306  nextItem->size += sizeDiff;
    7307  RegisterFreeSuballocation(nextItem);
    7308  }
    7309  // Next item is not free.
    7310  else
    7311  {
    7312  // Create free item after current one.
    7313  VmaSuballocation newFreeSuballoc;
    7314  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7315  newFreeSuballoc.offset = suballoc.offset + newSize;
    7316  newFreeSuballoc.size = sizeDiff;
    7317  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7318  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7319  RegisterFreeSuballocation(newFreeSuballocIt);
    7320 
    7321  ++m_FreeCount;
    7322  }
    7323  }
    7324  // This is the last item.
    7325  else
    7326  {
    7327  // Create free item at the end.
    7328  VmaSuballocation newFreeSuballoc;
    7329  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7330  newFreeSuballoc.offset = suballoc.offset + newSize;
    7331  newFreeSuballoc.size = sizeDiff;
    7332  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7333  m_Suballocations.push_back(newFreeSuballoc);
    7334 
    7335  iter_type newFreeSuballocIt = m_Suballocations.end();
    7336  RegisterFreeSuballocation(--newFreeSuballocIt);
    7337 
    7338  ++m_FreeCount;
    7339  }
    7340 
    7341  suballoc.size = newSize;
    7342  m_SumFreeSize += sizeDiff;
    7343  }
    7344  // Growing.
    7345  else
    7346  {
    7347  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7348 
    7349  // There is next item.
    7350  if(nextItem != m_Suballocations.end())
    7351  {
    7352  // Next item is free.
    7353  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7354  {
    7355  // There is not enough free space, including margin.
    7356  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7357  {
    7358  return false;
    7359  }
    7360 
    7361  // There is more free space than required.
    7362  if(nextItem->size > sizeDiff)
    7363  {
    7364  // Move and shrink this next item.
    7365  UnregisterFreeSuballocation(nextItem);
    7366  nextItem->offset += sizeDiff;
    7367  nextItem->size -= sizeDiff;
    7368  RegisterFreeSuballocation(nextItem);
    7369  }
    7370  // There is exactly the amount of free space required.
    7371  else
    7372  {
    7373  // Remove this next free item.
    7374  UnregisterFreeSuballocation(nextItem);
    7375  m_Suballocations.erase(nextItem);
    7376  --m_FreeCount;
    7377  }
    7378  }
    7379  // Next item is not free - there is no space to grow.
    7380  else
    7381  {
    7382  return false;
    7383  }
    7384  }
    7385  // This is the last item - there is no space to grow.
    7386  else
    7387  {
    7388  return false;
    7389  }
    7390 
    7391  suballoc.size = newSize;
    7392  m_SumFreeSize -= sizeDiff;
    7393  }
    7394 
    7395  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7396  return true;
    7397  }
    7398  }
    7399  VMA_ASSERT(0 && "Not found!");
    7400  return false;
    7401 }
    7402 
    7403 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7404 {
    7405  VkDeviceSize lastSize = 0;
    7406  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7407  {
    7408  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7409 
    7410  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7411  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7412  VMA_VALIDATE(it->size >= lastSize);
    7413  lastSize = it->size;
    7414  }
    7415  return true;
    7416 }
    7417 
    7418 bool VmaBlockMetadata_Generic::CheckAllocation(
    7419  uint32_t currentFrameIndex,
    7420  uint32_t frameInUseCount,
    7421  VkDeviceSize bufferImageGranularity,
    7422  VkDeviceSize allocSize,
    7423  VkDeviceSize allocAlignment,
    7424  VmaSuballocationType allocType,
    7425  VmaSuballocationList::const_iterator suballocItem,
    7426  bool canMakeOtherLost,
    7427  VkDeviceSize* pOffset,
    7428  size_t* itemsToMakeLostCount,
    7429  VkDeviceSize* pSumFreeSize,
    7430  VkDeviceSize* pSumItemSize) const
    7431 {
    7432  VMA_ASSERT(allocSize > 0);
    7433  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7434  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7435  VMA_ASSERT(pOffset != VMA_NULL);
    7436 
    7437  *itemsToMakeLostCount = 0;
    7438  *pSumFreeSize = 0;
    7439  *pSumItemSize = 0;
    7440 
    7441  if(canMakeOtherLost)
    7442  {
    7443  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7444  {
    7445  *pSumFreeSize = suballocItem->size;
    7446  }
    7447  else
    7448  {
    7449  if(suballocItem->hAllocation->CanBecomeLost() &&
    7450  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7451  {
    7452  ++*itemsToMakeLostCount;
    7453  *pSumItemSize = suballocItem->size;
    7454  }
    7455  else
    7456  {
    7457  return false;
    7458  }
    7459  }
    7460 
    7461  // Remaining size is too small for this request: Early return.
    7462  if(GetSize() - suballocItem->offset < allocSize)
    7463  {
    7464  return false;
    7465  }
    7466 
    7467  // Start from offset equal to beginning of this suballocation.
    7468  *pOffset = suballocItem->offset;
    7469 
    7470  // Apply VMA_DEBUG_MARGIN at the beginning.
    7471  if(VMA_DEBUG_MARGIN > 0)
    7472  {
    7473  *pOffset += VMA_DEBUG_MARGIN;
    7474  }
    7475 
    7476  // Apply alignment.
    7477  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7478 
    7479  // Check previous suballocations for BufferImageGranularity conflicts.
    7480  // Make bigger alignment if necessary.
    7481  if(bufferImageGranularity > 1)
    7482  {
    7483  bool bufferImageGranularityConflict = false;
    7484  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7485  while(prevSuballocItem != m_Suballocations.cbegin())
    7486  {
    7487  --prevSuballocItem;
    7488  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7489  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7490  {
    7491  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7492  {
    7493  bufferImageGranularityConflict = true;
    7494  break;
    7495  }
    7496  }
    7497  else
    7498  // Already on previous page.
    7499  break;
    7500  }
    7501  if(bufferImageGranularityConflict)
    7502  {
    7503  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7504  }
    7505  }
    7506 
    7507  // Now that we have final *pOffset, check if we are past suballocItem.
    7508  // If yes, return false - this function should be called for another suballocItem as starting point.
    7509  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7510  {
    7511  return false;
    7512  }
    7513 
    7514  // Calculate padding at the beginning based on current offset.
    7515  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7516 
    7517  // Calculate required margin at the end.
    7518  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7519 
    7520  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7521  // Another early return check.
    7522  if(suballocItem->offset + totalSize > GetSize())
    7523  {
    7524  return false;
    7525  }
    7526 
    7527  // Advance lastSuballocItem until desired size is reached.
    7528  // Update itemsToMakeLostCount.
    7529  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7530  if(totalSize > suballocItem->size)
    7531  {
    7532  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7533  while(remainingSize > 0)
    7534  {
    7535  ++lastSuballocItem;
    7536  if(lastSuballocItem == m_Suballocations.cend())
    7537  {
    7538  return false;
    7539  }
    7540  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7541  {
    7542  *pSumFreeSize += lastSuballocItem->size;
    7543  }
    7544  else
    7545  {
    7546  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7547  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7548  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7549  {
    7550  ++*itemsToMakeLostCount;
    7551  *pSumItemSize += lastSuballocItem->size;
    7552  }
    7553  else
    7554  {
    7555  return false;
    7556  }
    7557  }
    7558  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7559  remainingSize - lastSuballocItem->size : 0;
    7560  }
    7561  }
    7562 
    7563  // Check next suballocations for BufferImageGranularity conflicts.
    7564  // If conflict exists, we must mark more allocations lost or fail.
    7565  if(bufferImageGranularity > 1)
    7566  {
    7567  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7568  ++nextSuballocItem;
    7569  while(nextSuballocItem != m_Suballocations.cend())
    7570  {
    7571  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7572  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7573  {
    7574  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7575  {
    7576  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7577  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7578  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7579  {
    7580  ++*itemsToMakeLostCount;
    7581  }
    7582  else
    7583  {
    7584  return false;
    7585  }
    7586  }
    7587  }
    7588  else
    7589  {
    7590  // Already on next page.
    7591  break;
    7592  }
    7593  ++nextSuballocItem;
    7594  }
    7595  }
    7596  }
    7597  else
    7598  {
    7599  const VmaSuballocation& suballoc = *suballocItem;
    7600  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7601 
    7602  *pSumFreeSize = suballoc.size;
    7603 
    7604  // Size of this suballocation is too small for this request: Early return.
    7605  if(suballoc.size < allocSize)
    7606  {
    7607  return false;
    7608  }
    7609 
    7610  // Start from offset equal to beginning of this suballocation.
    7611  *pOffset = suballoc.offset;
    7612 
    7613  // Apply VMA_DEBUG_MARGIN at the beginning.
    7614  if(VMA_DEBUG_MARGIN > 0)
    7615  {
    7616  *pOffset += VMA_DEBUG_MARGIN;
    7617  }
    7618 
    7619  // Apply alignment.
    7620  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7621 
    7622  // Check previous suballocations for BufferImageGranularity conflicts.
    7623  // Make bigger alignment if necessary.
    7624  if(bufferImageGranularity > 1)
    7625  {
    7626  bool bufferImageGranularityConflict = false;
    7627  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7628  while(prevSuballocItem != m_Suballocations.cbegin())
    7629  {
    7630  --prevSuballocItem;
    7631  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7633  {
    7634  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7635  {
    7636  bufferImageGranularityConflict = true;
    7637  break;
    7638  }
    7639  }
    7640  else
    7641  // Already on previous page.
    7642  break;
    7643  }
    7644  if(bufferImageGranularityConflict)
    7645  {
    7646  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7647  }
    7648  }
    7649 
    7650  // Calculate padding at the beginning based on current offset.
    7651  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7652 
    7653  // Calculate required margin at the end.
    7654  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7655 
    7656  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7657  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7658  {
    7659  return false;
    7660  }
    7661 
    7662  // Check next suballocations for BufferImageGranularity conflicts.
    7663  // If conflict exists, allocation cannot be made here.
    7664  if(bufferImageGranularity > 1)
    7665  {
    7666  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7667  ++nextSuballocItem;
    7668  while(nextSuballocItem != m_Suballocations.cend())
    7669  {
    7670  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7671  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7672  {
    7673  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7674  {
    7675  return false;
    7676  }
    7677  }
    7678  else
    7679  {
    7680  // Already on next page.
    7681  break;
    7682  }
    7683  ++nextSuballocItem;
    7684  }
    7685  }
    7686  }
    7687 
    7688  // All tests passed: Success. pOffset is already filled.
    7689  return true;
    7690 }
    7691 
    7692 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7693 {
    7694  VMA_ASSERT(item != m_Suballocations.end());
    7695  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  VmaSuballocationList::iterator nextItem = item;
    7698  ++nextItem;
    7699  VMA_ASSERT(nextItem != m_Suballocations.end());
    7700  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7701 
    7702  item->size += nextItem->size;
    7703  --m_FreeCount;
    7704  m_Suballocations.erase(nextItem);
    7705 }
    7706 
    7707 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7708 {
    7709  // Change this suballocation to be marked as free.
    7710  VmaSuballocation& suballoc = *suballocItem;
    7711  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7712  suballoc.hAllocation = VK_NULL_HANDLE;
    7713 
    7714  // Update totals.
    7715  ++m_FreeCount;
    7716  m_SumFreeSize += suballoc.size;
    7717 
    7718  // Merge with previous and/or next suballocation if it's also free.
    7719  bool mergeWithNext = false;
    7720  bool mergeWithPrev = false;
    7721 
    7722  VmaSuballocationList::iterator nextItem = suballocItem;
    7723  ++nextItem;
    7724  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7725  {
    7726  mergeWithNext = true;
    7727  }
    7728 
    7729  VmaSuballocationList::iterator prevItem = suballocItem;
    7730  if(suballocItem != m_Suballocations.begin())
    7731  {
    7732  --prevItem;
    7733  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7734  {
    7735  mergeWithPrev = true;
    7736  }
    7737  }
    7738 
    7739  if(mergeWithNext)
    7740  {
    7741  UnregisterFreeSuballocation(nextItem);
    7742  MergeFreeWithNext(suballocItem);
    7743  }
    7744 
    7745  if(mergeWithPrev)
    7746  {
    7747  UnregisterFreeSuballocation(prevItem);
    7748  MergeFreeWithNext(prevItem);
    7749  RegisterFreeSuballocation(prevItem);
    7750  return prevItem;
    7751  }
    7752  else
    7753  {
    7754  RegisterFreeSuballocation(suballocItem);
    7755  return suballocItem;
    7756  }
    7757 }
    7758 
    7759 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7760 {
    7761  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7762  VMA_ASSERT(item->size > 0);
    7763 
    7764  // You may want to enable this validation at the beginning or at the end of
    7765  // this function, depending on what do you want to check.
    7766  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7767 
    7768  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7769  {
    7770  if(m_FreeSuballocationsBySize.empty())
    7771  {
    7772  m_FreeSuballocationsBySize.push_back(item);
    7773  }
    7774  else
    7775  {
    7776  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7777  }
    7778  }
    7779 
    7780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7781 }
    7782 
    7783 
    7784 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7785 {
    7786  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7787  VMA_ASSERT(item->size > 0);
    7788 
    7789  // You may want to enable this validation at the beginning or at the end of
    7790  // this function, depending on what do you want to check.
    7791  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7792 
    7793  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7794  {
    7795  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7796  m_FreeSuballocationsBySize.data(),
    7797  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7798  item,
    7799  VmaSuballocationItemSizeLess());
    7800  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7801  index < m_FreeSuballocationsBySize.size();
    7802  ++index)
    7803  {
    7804  if(m_FreeSuballocationsBySize[index] == item)
    7805  {
    7806  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7807  return;
    7808  }
    7809  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7810  }
    7811  VMA_ASSERT(0 && "Not found.");
    7812  }
    7813 
    7814  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7815 }
    7816 
    7818 // class VmaBlockMetadata_Linear
    7819 
    7820 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7821  VmaBlockMetadata(hAllocator),
    7822  m_SumFreeSize(0),
    7823  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7824  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_1stVectorIndex(0),
    7826  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7827  m_1stNullItemsBeginCount(0),
    7828  m_1stNullItemsMiddleCount(0),
    7829  m_2ndNullItemsCount(0)
    7830 {
    7831 }
    7832 
    7833 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7834 {
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7838 {
    7839  VmaBlockMetadata::Init(size);
    7840  m_SumFreeSize = size;
    7841 }
    7842 
    7843 bool VmaBlockMetadata_Linear::Validate() const
    7844 {
    7845  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7846  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7847 
    7848  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7849  VMA_VALIDATE(!suballocations1st.empty() ||
    7850  suballocations2nd.empty() ||
    7851  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7852 
    7853  if(!suballocations1st.empty())
    7854  {
    7855  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7856  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860  if(!suballocations2nd.empty())
    7861  {
    7862  // Null item at the end should be just pop_back().
    7863  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7864  }
    7865 
    7866  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7867  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7868 
    7869  VkDeviceSize sumUsedSize = 0;
    7870  const size_t suballoc1stCount = suballocations1st.size();
    7871  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7872 
    7873  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7874  {
    7875  const size_t suballoc2ndCount = suballocations2nd.size();
    7876  size_t nullItem2ndCount = 0;
    7877  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7878  {
    7879  const VmaSuballocation& suballoc = suballocations2nd[i];
    7880  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7881 
    7882  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7883  VMA_VALIDATE(suballoc.offset >= offset);
    7884 
    7885  if(!currFree)
    7886  {
    7887  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7888  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7889  sumUsedSize += suballoc.size;
    7890  }
    7891  else
    7892  {
    7893  ++nullItem2ndCount;
    7894  }
    7895 
    7896  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7897  }
    7898 
    7899  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7900  }
    7901 
    7902  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7903  {
    7904  const VmaSuballocation& suballoc = suballocations1st[i];
    7905  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7906  suballoc.hAllocation == VK_NULL_HANDLE);
    7907  }
    7908 
    7909  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7910 
    7911  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7912  {
    7913  const VmaSuballocation& suballoc = suballocations1st[i];
    7914  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7915 
    7916  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7917  VMA_VALIDATE(suballoc.offset >= offset);
    7918  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7919 
    7920  if(!currFree)
    7921  {
    7922  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7923  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7924  sumUsedSize += suballoc.size;
    7925  }
    7926  else
    7927  {
    7928  ++nullItem1stCount;
    7929  }
    7930 
    7931  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7932  }
    7933  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7934 
    7935  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7936  {
    7937  const size_t suballoc2ndCount = suballocations2nd.size();
    7938  size_t nullItem2ndCount = 0;
    7939  for(size_t i = suballoc2ndCount; i--; )
    7940  {
    7941  const VmaSuballocation& suballoc = suballocations2nd[i];
    7942  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7943 
    7944  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7945  VMA_VALIDATE(suballoc.offset >= offset);
    7946 
    7947  if(!currFree)
    7948  {
    7949  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7950  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7951  sumUsedSize += suballoc.size;
    7952  }
    7953  else
    7954  {
    7955  ++nullItem2ndCount;
    7956  }
    7957 
    7958  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7959  }
    7960 
    7961  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7962  }
    7963 
    7964  VMA_VALIDATE(offset <= GetSize());
    7965  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7966 
    7967  return true;
    7968 }
    7969 
    7970 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7971 {
    7972  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7973  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7974 }
    7975 
    7976 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7977 {
    7978  const VkDeviceSize size = GetSize();
    7979 
    7980  /*
    7981  We don't consider gaps inside allocation vectors with freed allocations because
    7982  they are not suitable for reuse in linear allocator. We consider only space that
    7983  is available for new allocations.
    7984  */
    7985  if(IsEmpty())
    7986  {
    7987  return size;
    7988  }
    7989 
    7990  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7991 
    7992  switch(m_2ndVectorMode)
    7993  {
    7994  case SECOND_VECTOR_EMPTY:
    7995  /*
    7996  Available space is after end of 1st, as well as before beginning of 1st (which
    7997  whould make it a ring buffer).
    7998  */
    7999  {
    8000  const size_t suballocations1stCount = suballocations1st.size();
    8001  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8002  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8003  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8004  return VMA_MAX(
    8005  firstSuballoc.offset,
    8006  size - (lastSuballoc.offset + lastSuballoc.size));
    8007  }
    8008  break;
    8009 
    8010  case SECOND_VECTOR_RING_BUFFER:
    8011  /*
    8012  Available space is only between end of 2nd and beginning of 1st.
    8013  */
    8014  {
    8015  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8016  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8017  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8018  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8019  }
    8020  break;
    8021 
    8022  case SECOND_VECTOR_DOUBLE_STACK:
    8023  /*
    8024  Available space is only between end of 1st and top of 2nd.
    8025  */
    8026  {
    8027  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8028  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8029  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8030  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8031  }
    8032  break;
    8033 
    8034  default:
    8035  VMA_ASSERT(0);
    8036  return 0;
    8037  }
    8038 }
    8039 
    8040 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8041 {
    8042  const VkDeviceSize size = GetSize();
    8043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8045  const size_t suballoc1stCount = suballocations1st.size();
    8046  const size_t suballoc2ndCount = suballocations2nd.size();
    8047 
    8048  outInfo.blockCount = 1;
    8049  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8050  outInfo.unusedRangeCount = 0;
    8051  outInfo.usedBytes = 0;
    8052  outInfo.allocationSizeMin = UINT64_MAX;
    8053  outInfo.allocationSizeMax = 0;
    8054  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8055  outInfo.unusedRangeSizeMax = 0;
    8056 
    8057  VkDeviceSize lastOffset = 0;
    8058 
    8059  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8060  {
    8061  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8062  size_t nextAlloc2ndIndex = 0;
    8063  while(lastOffset < freeSpace2ndTo1stEnd)
    8064  {
    8065  // Find next non-null allocation or move nextAllocIndex to the end.
    8066  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8067  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8068  {
    8069  ++nextAlloc2ndIndex;
    8070  }
    8071 
    8072  // Found non-null allocation.
    8073  if(nextAlloc2ndIndex < suballoc2ndCount)
    8074  {
    8075  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8076 
    8077  // 1. Process free space before this allocation.
    8078  if(lastOffset < suballoc.offset)
    8079  {
    8080  // There is free space from lastOffset to suballoc.offset.
    8081  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8082  ++outInfo.unusedRangeCount;
    8083  outInfo.unusedBytes += unusedRangeSize;
    8084  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8085  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8086  }
    8087 
    8088  // 2. Process this allocation.
    8089  // There is allocation with suballoc.offset, suballoc.size.
    8090  outInfo.usedBytes += suballoc.size;
    8091  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8092  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8093 
    8094  // 3. Prepare for next iteration.
    8095  lastOffset = suballoc.offset + suballoc.size;
    8096  ++nextAlloc2ndIndex;
    8097  }
    8098  // We are at the end.
    8099  else
    8100  {
    8101  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8102  if(lastOffset < freeSpace2ndTo1stEnd)
    8103  {
    8104  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8105  ++outInfo.unusedRangeCount;
    8106  outInfo.unusedBytes += unusedRangeSize;
    8107  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8108  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8109  }
    8110 
    8111  // End of loop.
    8112  lastOffset = freeSpace2ndTo1stEnd;
    8113  }
    8114  }
    8115  }
    8116 
    8117  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8118  const VkDeviceSize freeSpace1stTo2ndEnd =
    8119  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8120  while(lastOffset < freeSpace1stTo2ndEnd)
    8121  {
    8122  // Find next non-null allocation or move nextAllocIndex to the end.
    8123  while(nextAlloc1stIndex < suballoc1stCount &&
    8124  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8125  {
    8126  ++nextAlloc1stIndex;
    8127  }
    8128 
    8129  // Found non-null allocation.
    8130  if(nextAlloc1stIndex < suballoc1stCount)
    8131  {
    8132  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8133 
    8134  // 1. Process free space before this allocation.
    8135  if(lastOffset < suballoc.offset)
    8136  {
    8137  // There is free space from lastOffset to suballoc.offset.
    8138  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8139  ++outInfo.unusedRangeCount;
    8140  outInfo.unusedBytes += unusedRangeSize;
    8141  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8142  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8143  }
    8144 
    8145  // 2. Process this allocation.
    8146  // There is allocation with suballoc.offset, suballoc.size.
    8147  outInfo.usedBytes += suballoc.size;
    8148  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8149  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8150 
    8151  // 3. Prepare for next iteration.
    8152  lastOffset = suballoc.offset + suballoc.size;
    8153  ++nextAlloc1stIndex;
    8154  }
    8155  // We are at the end.
    8156  else
    8157  {
    8158  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8159  if(lastOffset < freeSpace1stTo2ndEnd)
    8160  {
    8161  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8162  ++outInfo.unusedRangeCount;
    8163  outInfo.unusedBytes += unusedRangeSize;
    8164  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8165  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8166  }
    8167 
    8168  // End of loop.
    8169  lastOffset = freeSpace1stTo2ndEnd;
    8170  }
    8171  }
    8172 
    8173  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8174  {
    8175  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8176  while(lastOffset < size)
    8177  {
    8178  // Find next non-null allocation or move nextAllocIndex to the end.
    8179  while(nextAlloc2ndIndex != SIZE_MAX &&
    8180  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8181  {
    8182  --nextAlloc2ndIndex;
    8183  }
    8184 
    8185  // Found non-null allocation.
    8186  if(nextAlloc2ndIndex != SIZE_MAX)
    8187  {
    8188  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8189 
    8190  // 1. Process free space before this allocation.
    8191  if(lastOffset < suballoc.offset)
    8192  {
    8193  // There is free space from lastOffset to suballoc.offset.
    8194  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8195  ++outInfo.unusedRangeCount;
    8196  outInfo.unusedBytes += unusedRangeSize;
    8197  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8198  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8199  }
    8200 
    8201  // 2. Process this allocation.
    8202  // There is allocation with suballoc.offset, suballoc.size.
    8203  outInfo.usedBytes += suballoc.size;
    8204  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8205  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8206 
    8207  // 3. Prepare for next iteration.
    8208  lastOffset = suballoc.offset + suballoc.size;
    8209  --nextAlloc2ndIndex;
    8210  }
    8211  // We are at the end.
    8212  else
    8213  {
    8214  // There is free space from lastOffset to size.
    8215  if(lastOffset < size)
    8216  {
    8217  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8218  ++outInfo.unusedRangeCount;
    8219  outInfo.unusedBytes += unusedRangeSize;
    8220  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8221  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8222  }
    8223 
    8224  // End of loop.
    8225  lastOffset = size;
    8226  }
    8227  }
    8228  }
    8229 
    8230  outInfo.unusedBytes = size - outInfo.usedBytes;
    8231 }
    8232 
    8233 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8234 {
    8235  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8236  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8237  const VkDeviceSize size = GetSize();
    8238  const size_t suballoc1stCount = suballocations1st.size();
    8239  const size_t suballoc2ndCount = suballocations2nd.size();
    8240 
    8241  inoutStats.size += size;
    8242 
    8243  VkDeviceSize lastOffset = 0;
    8244 
    8245  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8246  {
    8247  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8248  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8249  while(lastOffset < freeSpace2ndTo1stEnd)
    8250  {
    8251  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8252  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8253  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8254  {
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257 
    8258  // Found non-null allocation.
    8259  if(nextAlloc2ndIndex < suballoc2ndCount)
    8260  {
    8261  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8262 
    8263  // 1. Process free space before this allocation.
    8264  if(lastOffset < suballoc.offset)
    8265  {
    8266  // There is free space from lastOffset to suballoc.offset.
    8267  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8268  inoutStats.unusedSize += unusedRangeSize;
    8269  ++inoutStats.unusedRangeCount;
    8270  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8271  }
    8272 
    8273  // 2. Process this allocation.
    8274  // There is allocation with suballoc.offset, suballoc.size.
    8275  ++inoutStats.allocationCount;
    8276 
    8277  // 3. Prepare for next iteration.
    8278  lastOffset = suballoc.offset + suballoc.size;
    8279  ++nextAlloc2ndIndex;
    8280  }
    8281  // We are at the end.
    8282  else
    8283  {
    8284  if(lastOffset < freeSpace2ndTo1stEnd)
    8285  {
    8286  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8287  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8288  inoutStats.unusedSize += unusedRangeSize;
    8289  ++inoutStats.unusedRangeCount;
    8290  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8291  }
    8292 
    8293  // End of loop.
    8294  lastOffset = freeSpace2ndTo1stEnd;
    8295  }
    8296  }
    8297  }
    8298 
    8299  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8300  const VkDeviceSize freeSpace1stTo2ndEnd =
    8301  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8302  while(lastOffset < freeSpace1stTo2ndEnd)
    8303  {
    8304  // Find next non-null allocation or move nextAllocIndex to the end.
    8305  while(nextAlloc1stIndex < suballoc1stCount &&
    8306  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8307  {
    8308  ++nextAlloc1stIndex;
    8309  }
    8310 
    8311  // Found non-null allocation.
    8312  if(nextAlloc1stIndex < suballoc1stCount)
    8313  {
    8314  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8315 
    8316  // 1. Process free space before this allocation.
    8317  if(lastOffset < suballoc.offset)
    8318  {
    8319  // There is free space from lastOffset to suballoc.offset.
    8320  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8321  inoutStats.unusedSize += unusedRangeSize;
    8322  ++inoutStats.unusedRangeCount;
    8323  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8324  }
    8325 
    8326  // 2. Process this allocation.
    8327  // There is allocation with suballoc.offset, suballoc.size.
    8328  ++inoutStats.allocationCount;
    8329 
    8330  // 3. Prepare for next iteration.
    8331  lastOffset = suballoc.offset + suballoc.size;
    8332  ++nextAlloc1stIndex;
    8333  }
    8334  // We are at the end.
    8335  else
    8336  {
    8337  if(lastOffset < freeSpace1stTo2ndEnd)
    8338  {
    8339  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8340  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8341  inoutStats.unusedSize += unusedRangeSize;
    8342  ++inoutStats.unusedRangeCount;
    8343  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8344  }
    8345 
    8346  // End of loop.
    8347  lastOffset = freeSpace1stTo2ndEnd;
    8348  }
    8349  }
    8350 
    8351  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8352  {
    8353  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8354  while(lastOffset < size)
    8355  {
    8356  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8357  while(nextAlloc2ndIndex != SIZE_MAX &&
    8358  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8359  {
    8360  --nextAlloc2ndIndex;
    8361  }
    8362 
    8363  // Found non-null allocation.
    8364  if(nextAlloc2ndIndex != SIZE_MAX)
    8365  {
    8366  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8367 
    8368  // 1. Process free space before this allocation.
    8369  if(lastOffset < suballoc.offset)
    8370  {
    8371  // There is free space from lastOffset to suballoc.offset.
    8372  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8373  inoutStats.unusedSize += unusedRangeSize;
    8374  ++inoutStats.unusedRangeCount;
    8375  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8376  }
    8377 
    8378  // 2. Process this allocation.
    8379  // There is allocation with suballoc.offset, suballoc.size.
    8380  ++inoutStats.allocationCount;
    8381 
    8382  // 3. Prepare for next iteration.
    8383  lastOffset = suballoc.offset + suballoc.size;
    8384  --nextAlloc2ndIndex;
    8385  }
    8386  // We are at the end.
    8387  else
    8388  {
    8389  if(lastOffset < size)
    8390  {
    8391  // There is free space from lastOffset to size.
    8392  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8393  inoutStats.unusedSize += unusedRangeSize;
    8394  ++inoutStats.unusedRangeCount;
    8395  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8396  }
    8397 
    8398  // End of loop.
    8399  lastOffset = size;
    8400  }
    8401  }
    8402  }
    8403 }
    8404 
    8405 #if VMA_STATS_STRING_ENABLED
    8406 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8407 {
    8408  const VkDeviceSize size = GetSize();
    8409  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8410  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8411  const size_t suballoc1stCount = suballocations1st.size();
    8412  const size_t suballoc2ndCount = suballocations2nd.size();
    8413 
    8414  // FIRST PASS
    8415 
    8416  size_t unusedRangeCount = 0;
    8417  VkDeviceSize usedBytes = 0;
    8418 
    8419  VkDeviceSize lastOffset = 0;
    8420 
    8421  size_t alloc2ndCount = 0;
    8422  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8423  {
    8424  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8425  size_t nextAlloc2ndIndex = 0;
    8426  while(lastOffset < freeSpace2ndTo1stEnd)
    8427  {
    8428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8429  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8431  {
    8432  ++nextAlloc2ndIndex;
    8433  }
    8434 
    8435  // Found non-null allocation.
    8436  if(nextAlloc2ndIndex < suballoc2ndCount)
    8437  {
    8438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8439 
    8440  // 1. Process free space before this allocation.
    8441  if(lastOffset < suballoc.offset)
    8442  {
    8443  // There is free space from lastOffset to suballoc.offset.
    8444  ++unusedRangeCount;
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  ++alloc2ndCount;
    8450  usedBytes += suballoc.size;
    8451 
    8452  // 3. Prepare for next iteration.
    8453  lastOffset = suballoc.offset + suballoc.size;
    8454  ++nextAlloc2ndIndex;
    8455  }
    8456  // We are at the end.
    8457  else
    8458  {
    8459  if(lastOffset < freeSpace2ndTo1stEnd)
    8460  {
    8461  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8462  ++unusedRangeCount;
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace2ndTo1stEnd;
    8467  }
    8468  }
    8469  }
    8470 
    8471  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8472  size_t alloc1stCount = 0;
    8473  const VkDeviceSize freeSpace1stTo2ndEnd =
    8474  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8475  while(lastOffset < freeSpace1stTo2ndEnd)
    8476  {
    8477  // Find next non-null allocation or move nextAllocIndex to the end.
    8478  while(nextAlloc1stIndex < suballoc1stCount &&
    8479  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8480  {
    8481  ++nextAlloc1stIndex;
    8482  }
    8483 
    8484  // Found non-null allocation.
    8485  if(nextAlloc1stIndex < suballoc1stCount)
    8486  {
    8487  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8488 
    8489  // 1. Process free space before this allocation.
    8490  if(lastOffset < suballoc.offset)
    8491  {
    8492  // There is free space from lastOffset to suballoc.offset.
    8493  ++unusedRangeCount;
    8494  }
    8495 
    8496  // 2. Process this allocation.
    8497  // There is allocation with suballoc.offset, suballoc.size.
    8498  ++alloc1stCount;
    8499  usedBytes += suballoc.size;
    8500 
    8501  // 3. Prepare for next iteration.
    8502  lastOffset = suballoc.offset + suballoc.size;
    8503  ++nextAlloc1stIndex;
    8504  }
    8505  // We are at the end.
    8506  else
    8507  {
    8508  if(lastOffset < size)
    8509  {
    8510  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8511  ++unusedRangeCount;
    8512  }
    8513 
    8514  // End of loop.
    8515  lastOffset = freeSpace1stTo2ndEnd;
    8516  }
    8517  }
    8518 
    8519  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8520  {
    8521  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8522  while(lastOffset < size)
    8523  {
    8524  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8525  while(nextAlloc2ndIndex != SIZE_MAX &&
    8526  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8527  {
    8528  --nextAlloc2ndIndex;
    8529  }
    8530 
    8531  // Found non-null allocation.
    8532  if(nextAlloc2ndIndex != SIZE_MAX)
    8533  {
    8534  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8535 
    8536  // 1. Process free space before this allocation.
    8537  if(lastOffset < suballoc.offset)
    8538  {
    8539  // There is free space from lastOffset to suballoc.offset.
    8540  ++unusedRangeCount;
    8541  }
    8542 
    8543  // 2. Process this allocation.
    8544  // There is allocation with suballoc.offset, suballoc.size.
    8545  ++alloc2ndCount;
    8546  usedBytes += suballoc.size;
    8547 
    8548  // 3. Prepare for next iteration.
    8549  lastOffset = suballoc.offset + suballoc.size;
    8550  --nextAlloc2ndIndex;
    8551  }
    8552  // We are at the end.
    8553  else
    8554  {
    8555  if(lastOffset < size)
    8556  {
    8557  // There is free space from lastOffset to size.
    8558  ++unusedRangeCount;
    8559  }
    8560 
    8561  // End of loop.
    8562  lastOffset = size;
    8563  }
    8564  }
    8565  }
    8566 
    8567  const VkDeviceSize unusedBytes = size - usedBytes;
    8568  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8569 
    8570  // SECOND PASS
    8571  lastOffset = 0;
    8572 
    8573  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8574  {
    8575  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8576  size_t nextAlloc2ndIndex = 0;
    8577  while(lastOffset < freeSpace2ndTo1stEnd)
    8578  {
    8579  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8580  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8581  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8582  {
    8583  ++nextAlloc2ndIndex;
    8584  }
    8585 
    8586  // Found non-null allocation.
    8587  if(nextAlloc2ndIndex < suballoc2ndCount)
    8588  {
    8589  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8590 
    8591  // 1. Process free space before this allocation.
    8592  if(lastOffset < suballoc.offset)
    8593  {
    8594  // There is free space from lastOffset to suballoc.offset.
    8595  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8596  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8597  }
    8598 
    8599  // 2. Process this allocation.
    8600  // There is allocation with suballoc.offset, suballoc.size.
    8601  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8602 
    8603  // 3. Prepare for next iteration.
    8604  lastOffset = suballoc.offset + suballoc.size;
    8605  ++nextAlloc2ndIndex;
    8606  }
    8607  // We are at the end.
    8608  else
    8609  {
    8610  if(lastOffset < freeSpace2ndTo1stEnd)
    8611  {
    8612  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8613  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8614  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8615  }
    8616 
    8617  // End of loop.
    8618  lastOffset = freeSpace2ndTo1stEnd;
    8619  }
    8620  }
    8621  }
    8622 
    8623  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8624  while(lastOffset < freeSpace1stTo2ndEnd)
    8625  {
    8626  // Find next non-null allocation or move nextAllocIndex to the end.
    8627  while(nextAlloc1stIndex < suballoc1stCount &&
    8628  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8629  {
    8630  ++nextAlloc1stIndex;
    8631  }
    8632 
    8633  // Found non-null allocation.
    8634  if(nextAlloc1stIndex < suballoc1stCount)
    8635  {
    8636  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8637 
    8638  // 1. Process free space before this allocation.
    8639  if(lastOffset < suballoc.offset)
    8640  {
    8641  // There is free space from lastOffset to suballoc.offset.
    8642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8644  }
    8645 
    8646  // 2. Process this allocation.
    8647  // There is allocation with suballoc.offset, suballoc.size.
    8648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8649 
    8650  // 3. Prepare for next iteration.
    8651  lastOffset = suballoc.offset + suballoc.size;
    8652  ++nextAlloc1stIndex;
    8653  }
    8654  // We are at the end.
    8655  else
    8656  {
    8657  if(lastOffset < freeSpace1stTo2ndEnd)
    8658  {
    8659  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8660  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8662  }
    8663 
    8664  // End of loop.
    8665  lastOffset = freeSpace1stTo2ndEnd;
    8666  }
    8667  }
    8668 
    8669  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8670  {
    8671  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8672  while(lastOffset < size)
    8673  {
    8674  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8675  while(nextAlloc2ndIndex != SIZE_MAX &&
    8676  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8677  {
    8678  --nextAlloc2ndIndex;
    8679  }
    8680 
    8681  // Found non-null allocation.
    8682  if(nextAlloc2ndIndex != SIZE_MAX)
    8683  {
    8684  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8685 
    8686  // 1. Process free space before this allocation.
    8687  if(lastOffset < suballoc.offset)
    8688  {
    8689  // There is free space from lastOffset to suballoc.offset.
    8690  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8692  }
    8693 
    8694  // 2. Process this allocation.
    8695  // There is allocation with suballoc.offset, suballoc.size.
    8696  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8697 
    8698  // 3. Prepare for next iteration.
    8699  lastOffset = suballoc.offset + suballoc.size;
    8700  --nextAlloc2ndIndex;
    8701  }
    8702  // We are at the end.
    8703  else
    8704  {
    8705  if(lastOffset < size)
    8706  {
    8707  // There is free space from lastOffset to size.
    8708  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8709  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8710  }
    8711 
    8712  // End of loop.
    8713  lastOffset = size;
    8714  }
    8715  }
    8716  }
    8717 
    8718  PrintDetailedMap_End(json);
    8719 }
    8720 #endif // #if VMA_STATS_STRING_ENABLED
    8721 
    8722 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8723  uint32_t currentFrameIndex,
    8724  uint32_t frameInUseCount,
    8725  VkDeviceSize bufferImageGranularity,
    8726  VkDeviceSize allocSize,
    8727  VkDeviceSize allocAlignment,
    8728  bool upperAddress,
    8729  VmaSuballocationType allocType,
    8730  bool canMakeOtherLost,
    8731  uint32_t strategy,
    8732  VmaAllocationRequest* pAllocationRequest)
    8733 {
    8734  VMA_ASSERT(allocSize > 0);
    8735  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8736  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8737  VMA_HEAVY_ASSERT(Validate());
    8738 
    8739  const VkDeviceSize size = GetSize();
    8740  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8741  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8742 
    8743  if(upperAddress)
    8744  {
    8745  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8746  {
    8747  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8748  return false;
    8749  }
    8750 
    8751  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8752  if(allocSize > size)
    8753  {
    8754  return false;
    8755  }
    8756  VkDeviceSize resultBaseOffset = size - allocSize;
    8757  if(!suballocations2nd.empty())
    8758  {
    8759  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8760  resultBaseOffset = lastSuballoc.offset - allocSize;
    8761  if(allocSize > lastSuballoc.offset)
    8762  {
    8763  return false;
    8764  }
    8765  }
    8766 
    8767  // Start from offset equal to end of free space.
    8768  VkDeviceSize resultOffset = resultBaseOffset;
    8769 
    8770  // Apply VMA_DEBUG_MARGIN at the end.
    8771  if(VMA_DEBUG_MARGIN > 0)
    8772  {
    8773  if(resultOffset < VMA_DEBUG_MARGIN)
    8774  {
    8775  return false;
    8776  }
    8777  resultOffset -= VMA_DEBUG_MARGIN;
    8778  }
    8779 
    8780  // Apply alignment.
    8781  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8782 
    8783  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8784  // Make bigger alignment if necessary.
    8785  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8786  {
    8787  bool bufferImageGranularityConflict = false;
    8788  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8789  {
    8790  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8791  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8792  {
    8793  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8794  {
    8795  bufferImageGranularityConflict = true;
    8796  break;
    8797  }
    8798  }
    8799  else
    8800  // Already on previous page.
    8801  break;
    8802  }
    8803  if(bufferImageGranularityConflict)
    8804  {
    8805  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8806  }
    8807  }
    8808 
    8809  // There is enough free space.
    8810  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8811  suballocations1st.back().offset + suballocations1st.back().size :
    8812  0;
    8813  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8814  {
    8815  // Check previous suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, allocation cannot be made here.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8820  {
    8821  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8822  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8823  {
    8824  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8825  {
    8826  return false;
    8827  }
    8828  }
    8829  else
    8830  {
    8831  // Already on next page.
    8832  break;
    8833  }
    8834  }
    8835  }
    8836 
    8837  // All tests passed: Success.
    8838  pAllocationRequest->offset = resultOffset;
    8839  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8840  pAllocationRequest->sumItemSize = 0;
    8841  // pAllocationRequest->item unused.
    8842  pAllocationRequest->itemsToMakeLostCount = 0;
    8843  return true;
    8844  }
    8845  }
    8846  else // !upperAddress
    8847  {
    8848  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8849  {
    8850  // Try to allocate at the end of 1st vector.
    8851 
    8852  VkDeviceSize resultBaseOffset = 0;
    8853  if(!suballocations1st.empty())
    8854  {
    8855  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8856  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8857  }
    8858 
    8859  // Start from offset equal to beginning of free space.
    8860  VkDeviceSize resultOffset = resultBaseOffset;
    8861 
    8862  // Apply VMA_DEBUG_MARGIN at the beginning.
    8863  if(VMA_DEBUG_MARGIN > 0)
    8864  {
    8865  resultOffset += VMA_DEBUG_MARGIN;
    8866  }
    8867 
    8868  // Apply alignment.
    8869  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8870 
    8871  // Check previous suballocations for BufferImageGranularity conflicts.
    8872  // Make bigger alignment if necessary.
    8873  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8874  {
    8875  bool bufferImageGranularityConflict = false;
    8876  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8877  {
    8878  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8879  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8880  {
    8881  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8882  {
    8883  bufferImageGranularityConflict = true;
    8884  break;
    8885  }
    8886  }
    8887  else
    8888  // Already on previous page.
    8889  break;
    8890  }
    8891  if(bufferImageGranularityConflict)
    8892  {
    8893  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8894  }
    8895  }
    8896 
    8897  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8898  suballocations2nd.back().offset : size;
    8899 
    8900  // There is enough free space at the end after alignment.
    8901  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8902  {
    8903  // Check next suballocations for BufferImageGranularity conflicts.
    8904  // If conflict exists, allocation cannot be made here.
    8905  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8906  {
    8907  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8908  {
    8909  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8910  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8911  {
    8912  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8913  {
    8914  return false;
    8915  }
    8916  }
    8917  else
    8918  {
    8919  // Already on previous page.
    8920  break;
    8921  }
    8922  }
    8923  }
    8924 
    8925  // All tests passed: Success.
    8926  pAllocationRequest->offset = resultOffset;
    8927  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8928  pAllocationRequest->sumItemSize = 0;
    8929  // pAllocationRequest->item unused.
    8930  pAllocationRequest->itemsToMakeLostCount = 0;
    8931  return true;
    8932  }
    8933  }
    8934 
    8935  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8936  // beginning of 1st vector as the end of free space.
    8937  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8938  {
    8939  VMA_ASSERT(!suballocations1st.empty());
    8940 
    8941  VkDeviceSize resultBaseOffset = 0;
    8942  if(!suballocations2nd.empty())
    8943  {
    8944  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8945  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8946  }
    8947 
    8948  // Start from offset equal to beginning of free space.
    8949  VkDeviceSize resultOffset = resultBaseOffset;
    8950 
    8951  // Apply VMA_DEBUG_MARGIN at the beginning.
    8952  if(VMA_DEBUG_MARGIN > 0)
    8953  {
    8954  resultOffset += VMA_DEBUG_MARGIN;
    8955  }
    8956 
    8957  // Apply alignment.
    8958  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8959 
    8960  // Check previous suballocations for BufferImageGranularity conflicts.
    8961  // Make bigger alignment if necessary.
    8962  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8963  {
    8964  bool bufferImageGranularityConflict = false;
    8965  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8966  {
    8967  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8968  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8969  {
    8970  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8971  {
    8972  bufferImageGranularityConflict = true;
    8973  break;
    8974  }
    8975  }
    8976  else
    8977  // Already on previous page.
    8978  break;
    8979  }
    8980  if(bufferImageGranularityConflict)
    8981  {
    8982  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8983  }
    8984  }
    8985 
    8986  pAllocationRequest->itemsToMakeLostCount = 0;
    8987  pAllocationRequest->sumItemSize = 0;
    8988  size_t index1st = m_1stNullItemsBeginCount;
    8989 
    8990  if(canMakeOtherLost)
    8991  {
    8992  while(index1st < suballocations1st.size() &&
    8993  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8994  {
    8995  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8996  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8997  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8998  {
    8999  // No problem.
    9000  }
    9001  else
    9002  {
    9003  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9004  if(suballoc.hAllocation->CanBecomeLost() &&
    9005  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9006  {
    9007  ++pAllocationRequest->itemsToMakeLostCount;
    9008  pAllocationRequest->sumItemSize += suballoc.size;
    9009  }
    9010  else
    9011  {
    9012  return false;
    9013  }
    9014  }
    9015  ++index1st;
    9016  }
    9017 
    9018  // Check next suballocations for BufferImageGranularity conflicts.
    9019  // If conflict exists, we must mark more allocations lost or fail.
    9020  if(bufferImageGranularity > 1)
    9021  {
    9022  while(index1st < suballocations1st.size())
    9023  {
    9024  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9025  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9026  {
    9027  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9028  {
    9029  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9030  if(suballoc.hAllocation->CanBecomeLost() &&
    9031  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9032  {
    9033  ++pAllocationRequest->itemsToMakeLostCount;
    9034  pAllocationRequest->sumItemSize += suballoc.size;
    9035  }
    9036  else
    9037  {
    9038  return false;
    9039  }
    9040  }
    9041  }
    9042  else
    9043  {
    9044  // Already on next page.
    9045  break;
    9046  }
    9047  ++index1st;
    9048  }
    9049  }
    9050  }
    9051 
    9052  // There is enough free space at the end after alignment.
    9053  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9054  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9055  {
    9056  // Check next suballocations for BufferImageGranularity conflicts.
    9057  // If conflict exists, allocation cannot be made here.
    9058  if(bufferImageGranularity > 1)
    9059  {
    9060  for(size_t nextSuballocIndex = index1st;
    9061  nextSuballocIndex < suballocations1st.size();
    9062  nextSuballocIndex++)
    9063  {
    9064  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9065  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9066  {
    9067  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9068  {
    9069  return false;
    9070  }
    9071  }
    9072  else
    9073  {
    9074  // Already on next page.
    9075  break;
    9076  }
    9077  }
    9078  }
    9079 
    9080  // All tests passed: Success.
    9081  pAllocationRequest->offset = resultOffset;
    9082  pAllocationRequest->sumFreeSize =
    9083  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9084  - resultBaseOffset
    9085  - pAllocationRequest->sumItemSize;
    9086  // pAllocationRequest->item unused.
    9087  return true;
    9088  }
    9089  }
    9090  }
    9091 
    9092  return false;
    9093 }
    9094 
    9095 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9096  uint32_t currentFrameIndex,
    9097  uint32_t frameInUseCount,
    9098  VmaAllocationRequest* pAllocationRequest)
    9099 {
    9100  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9101  {
    9102  return true;
    9103  }
    9104 
    9105  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9106 
    9107  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9108  size_t index1st = m_1stNullItemsBeginCount;
    9109  size_t madeLostCount = 0;
    9110  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9111  {
    9112  VMA_ASSERT(index1st < suballocations1st.size());
    9113  VmaSuballocation& suballoc = suballocations1st[index1st];
    9114  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9115  {
    9116  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9117  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9118  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9119  {
    9120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9121  suballoc.hAllocation = VK_NULL_HANDLE;
    9122  m_SumFreeSize += suballoc.size;
    9123  ++m_1stNullItemsMiddleCount;
    9124  ++madeLostCount;
    9125  }
    9126  else
    9127  {
    9128  return false;
    9129  }
    9130  }
    9131  ++index1st;
    9132  }
    9133 
    9134  CleanupAfterFree();
    9135  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9136 
    9137  return true;
    9138 }
    9139 
    9140 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9141 {
    9142  uint32_t lostAllocationCount = 0;
    9143 
    9144  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9145  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9146  {
    9147  VmaSuballocation& suballoc = suballocations1st[i];
    9148  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9149  suballoc.hAllocation->CanBecomeLost() &&
    9150  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9151  {
    9152  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9153  suballoc.hAllocation = VK_NULL_HANDLE;
    9154  ++m_1stNullItemsMiddleCount;
    9155  m_SumFreeSize += suballoc.size;
    9156  ++lostAllocationCount;
    9157  }
    9158  }
    9159 
    9160  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9161  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9162  {
    9163  VmaSuballocation& suballoc = suballocations2nd[i];
    9164  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9165  suballoc.hAllocation->CanBecomeLost() &&
    9166  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9167  {
    9168  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  suballoc.hAllocation = VK_NULL_HANDLE;
    9170  ++m_2ndNullItemsCount;
    9171  ++lostAllocationCount;
    9172  }
    9173  }
    9174 
    9175  if(lostAllocationCount)
    9176  {
    9177  CleanupAfterFree();
    9178  }
    9179 
    9180  return lostAllocationCount;
    9181 }
    9182 
    9183 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9184 {
    9185  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9186  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9187  {
    9188  const VmaSuballocation& suballoc = suballocations1st[i];
    9189  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9190  {
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9197  {
    9198  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9199  return VK_ERROR_VALIDATION_FAILED_EXT;
    9200  }
    9201  }
    9202  }
    9203 
    9204  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9205  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9206  {
    9207  const VmaSuballocation& suballoc = suballocations2nd[i];
    9208  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9209  {
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9216  {
    9217  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9218  return VK_ERROR_VALIDATION_FAILED_EXT;
    9219  }
    9220  }
    9221  }
    9222 
    9223  return VK_SUCCESS;
    9224 }
    9225 
    9226 void VmaBlockMetadata_Linear::Alloc(
    9227  const VmaAllocationRequest& request,
    9228  VmaSuballocationType type,
    9229  VkDeviceSize allocSize,
    9230  bool upperAddress,
    9231  VmaAllocation hAllocation)
    9232 {
    9233  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9234 
    9235  if(upperAddress)
    9236  {
    9237  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9238  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9239  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9240  suballocations2nd.push_back(newSuballoc);
    9241  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9242  }
    9243  else
    9244  {
    9245  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9246 
    9247  // First allocation.
    9248  if(suballocations1st.empty())
    9249  {
    9250  suballocations1st.push_back(newSuballoc);
    9251  }
    9252  else
    9253  {
    9254  // New allocation at the end of 1st vector.
    9255  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9256  {
    9257  // Check if it fits before the end of the block.
    9258  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9259  suballocations1st.push_back(newSuballoc);
    9260  }
    9261  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9262  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9263  {
    9264  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9265 
    9266  switch(m_2ndVectorMode)
    9267  {
    9268  case SECOND_VECTOR_EMPTY:
    9269  // First allocation from second part ring buffer.
    9270  VMA_ASSERT(suballocations2nd.empty());
    9271  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9272  break;
    9273  case SECOND_VECTOR_RING_BUFFER:
    9274  // 2-part ring buffer is already started.
    9275  VMA_ASSERT(!suballocations2nd.empty());
    9276  break;
    9277  case SECOND_VECTOR_DOUBLE_STACK:
    9278  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9279  break;
    9280  default:
    9281  VMA_ASSERT(0);
    9282  }
    9283 
    9284  suballocations2nd.push_back(newSuballoc);
    9285  }
    9286  else
    9287  {
    9288  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9289  }
    9290  }
    9291  }
    9292 
    9293  m_SumFreeSize -= newSuballoc.size;
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9297 {
    9298  FreeAtOffset(allocation->GetOffset());
    9299 }
    9300 
    9301 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9302 {
    9303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9305 
    9306  if(!suballocations1st.empty())
    9307  {
    9308  // First allocation: Mark it as next empty at the beginning.
    9309  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9310  if(firstSuballoc.offset == offset)
    9311  {
    9312  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9313  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9314  m_SumFreeSize += firstSuballoc.size;
    9315  ++m_1stNullItemsBeginCount;
    9316  CleanupAfterFree();
    9317  return;
    9318  }
    9319  }
    9320 
    9321  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9322  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9323  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9324  {
    9325  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9326  if(lastSuballoc.offset == offset)
    9327  {
    9328  m_SumFreeSize += lastSuballoc.size;
    9329  suballocations2nd.pop_back();
    9330  CleanupAfterFree();
    9331  return;
    9332  }
    9333  }
    9334  // Last allocation in 1st vector.
    9335  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9336  {
    9337  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9338  if(lastSuballoc.offset == offset)
    9339  {
    9340  m_SumFreeSize += lastSuballoc.size;
    9341  suballocations1st.pop_back();
    9342  CleanupAfterFree();
    9343  return;
    9344  }
    9345  }
    9346 
    9347  // Item from the middle of 1st vector.
    9348  {
    9349  VmaSuballocation refSuballoc;
    9350  refSuballoc.offset = offset;
    9351  // Rest of members stays uninitialized intentionally for better performance.
    9352  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9353  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9354  suballocations1st.end(),
    9355  refSuballoc);
    9356  if(it != suballocations1st.end())
    9357  {
    9358  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9359  it->hAllocation = VK_NULL_HANDLE;
    9360  ++m_1stNullItemsMiddleCount;
    9361  m_SumFreeSize += it->size;
    9362  CleanupAfterFree();
    9363  return;
    9364  }
    9365  }
    9366 
    9367  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9368  {
    9369  // Item from the middle of 2nd vector.
    9370  VmaSuballocation refSuballoc;
    9371  refSuballoc.offset = offset;
    9372  // Rest of members stays uninitialized intentionally for better performance.
    9373  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9374  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9375  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9376  if(it != suballocations2nd.end())
    9377  {
    9378  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9379  it->hAllocation = VK_NULL_HANDLE;
    9380  ++m_2ndNullItemsCount;
    9381  m_SumFreeSize += it->size;
    9382  CleanupAfterFree();
    9383  return;
    9384  }
    9385  }
    9386 
    9387  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9388 }
    9389 
    9390 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9391 {
    9392  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9393  const size_t suballocCount = AccessSuballocations1st().size();
    9394  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9395 }
    9396 
    9397 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9398 {
    9399  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9400  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9401 
    9402  if(IsEmpty())
    9403  {
    9404  suballocations1st.clear();
    9405  suballocations2nd.clear();
    9406  m_1stNullItemsBeginCount = 0;
    9407  m_1stNullItemsMiddleCount = 0;
    9408  m_2ndNullItemsCount = 0;
    9409  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9410  }
    9411  else
    9412  {
    9413  const size_t suballoc1stCount = suballocations1st.size();
    9414  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9415  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9416 
    9417  // Find more null items at the beginning of 1st vector.
    9418  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9419  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9420  {
    9421  ++m_1stNullItemsBeginCount;
    9422  --m_1stNullItemsMiddleCount;
    9423  }
    9424 
    9425  // Find more null items at the end of 1st vector.
    9426  while(m_1stNullItemsMiddleCount > 0 &&
    9427  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9428  {
    9429  --m_1stNullItemsMiddleCount;
    9430  suballocations1st.pop_back();
    9431  }
    9432 
    9433  // Find more null items at the end of 2nd vector.
    9434  while(m_2ndNullItemsCount > 0 &&
    9435  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9436  {
    9437  --m_2ndNullItemsCount;
    9438  suballocations2nd.pop_back();
    9439  }
    9440 
    9441  if(ShouldCompact1st())
    9442  {
    9443  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9444  size_t srcIndex = m_1stNullItemsBeginCount;
    9445  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9446  {
    9447  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9448  {
    9449  ++srcIndex;
    9450  }
    9451  if(dstIndex != srcIndex)
    9452  {
    9453  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9454  }
    9455  ++srcIndex;
    9456  }
    9457  suballocations1st.resize(nonNullItemCount);
    9458  m_1stNullItemsBeginCount = 0;
    9459  m_1stNullItemsMiddleCount = 0;
    9460  }
    9461 
    9462  // 2nd vector became empty.
    9463  if(suballocations2nd.empty())
    9464  {
    9465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9466  }
    9467 
    9468  // 1st vector became empty.
    9469  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9470  {
    9471  suballocations1st.clear();
    9472  m_1stNullItemsBeginCount = 0;
    9473 
    9474  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9475  {
    9476  // Swap 1st with 2nd. Now 2nd is empty.
    9477  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9478  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9479  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9480  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9481  {
    9482  ++m_1stNullItemsBeginCount;
    9483  --m_1stNullItemsMiddleCount;
    9484  }
    9485  m_2ndNullItemsCount = 0;
    9486  m_1stVectorIndex ^= 1;
    9487  }
    9488  }
    9489  }
    9490 
    9491  VMA_HEAVY_ASSERT(Validate());
    9492 }
    9493 
    9494 
    9496 // class VmaBlockMetadata_Buddy
    9497 
    9498 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9499  VmaBlockMetadata(hAllocator),
    9500  m_Root(VMA_NULL),
    9501  m_AllocationCount(0),
    9502  m_FreeCount(1),
    9503  m_SumFreeSize(0)
    9504 {
    9505  memset(m_FreeList, 0, sizeof(m_FreeList));
    9506 }
    9507 
    9508 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9509 {
    9510  DeleteNode(m_Root);
    9511 }
    9512 
    9513 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9514 {
    9515  VmaBlockMetadata::Init(size);
    9516 
    9517  m_UsableSize = VmaPrevPow2(size);
    9518  m_SumFreeSize = m_UsableSize;
    9519 
    9520  // Calculate m_LevelCount.
    9521  m_LevelCount = 1;
    9522  while(m_LevelCount < MAX_LEVELS &&
    9523  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9524  {
    9525  ++m_LevelCount;
    9526  }
    9527 
    9528  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9529  rootNode->offset = 0;
    9530  rootNode->type = Node::TYPE_FREE;
    9531  rootNode->parent = VMA_NULL;
    9532  rootNode->buddy = VMA_NULL;
    9533 
    9534  m_Root = rootNode;
    9535  AddToFreeListFront(0, rootNode);
    9536 }
    9537 
    9538 bool VmaBlockMetadata_Buddy::Validate() const
    9539 {
    9540  // Validate tree.
    9541  ValidationContext ctx;
    9542  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9543  {
    9544  VMA_VALIDATE(false && "ValidateNode failed.");
    9545  }
    9546  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9547  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9548 
    9549  // Validate free node lists.
    9550  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9551  {
    9552  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9553  m_FreeList[level].front->free.prev == VMA_NULL);
    9554 
    9555  for(Node* node = m_FreeList[level].front;
    9556  node != VMA_NULL;
    9557  node = node->free.next)
    9558  {
    9559  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9560 
    9561  if(node->free.next == VMA_NULL)
    9562  {
    9563  VMA_VALIDATE(m_FreeList[level].back == node);
    9564  }
    9565  else
    9566  {
    9567  VMA_VALIDATE(node->free.next->free.prev == node);
    9568  }
    9569  }
    9570  }
    9571 
    9572  // Validate that free lists ar higher levels are empty.
    9573  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9574  {
    9575  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9576  }
    9577 
    9578  return true;
    9579 }
    9580 
    9581 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9582 {
    9583  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9584  {
    9585  if(m_FreeList[level].front != VMA_NULL)
    9586  {
    9587  return LevelToNodeSize(level);
    9588  }
    9589  }
    9590  return 0;
    9591 }
    9592 
    9593 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9594 {
    9595  const VkDeviceSize unusableSize = GetUnusableSize();
    9596 
    9597  outInfo.blockCount = 1;
    9598 
    9599  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9600  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9601 
    9602  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9603  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9604  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9605 
    9606  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9607 
    9608  if(unusableSize > 0)
    9609  {
    9610  ++outInfo.unusedRangeCount;
    9611  outInfo.unusedBytes += unusableSize;
    9612  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9613  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9614  }
    9615 }
    9616 
    9617 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9618 {
    9619  const VkDeviceSize unusableSize = GetUnusableSize();
    9620 
    9621  inoutStats.size += GetSize();
    9622  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9623  inoutStats.allocationCount += m_AllocationCount;
    9624  inoutStats.unusedRangeCount += m_FreeCount;
    9625  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9626 
    9627  if(unusableSize > 0)
    9628  {
    9629  ++inoutStats.unusedRangeCount;
    9630  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9631  }
    9632 }
    9633 
    9634 #if VMA_STATS_STRING_ENABLED
    9635 
    9636 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9637 {
    9638  // TODO optimize
    9639  VmaStatInfo stat;
    9640  CalcAllocationStatInfo(stat);
    9641 
    9642  PrintDetailedMap_Begin(
    9643  json,
    9644  stat.unusedBytes,
    9645  stat.allocationCount,
    9646  stat.unusedRangeCount);
    9647 
    9648  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9649 
    9650  const VkDeviceSize unusableSize = GetUnusableSize();
    9651  if(unusableSize > 0)
    9652  {
    9653  PrintDetailedMap_UnusedRange(json,
    9654  m_UsableSize, // offset
    9655  unusableSize); // size
    9656  }
    9657 
    9658  PrintDetailedMap_End(json);
    9659 }
    9660 
    9661 #endif // #if VMA_STATS_STRING_ENABLED
    9662 
    9663 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9664  uint32_t currentFrameIndex,
    9665  uint32_t frameInUseCount,
    9666  VkDeviceSize bufferImageGranularity,
    9667  VkDeviceSize allocSize,
    9668  VkDeviceSize allocAlignment,
    9669  bool upperAddress,
    9670  VmaSuballocationType allocType,
    9671  bool canMakeOtherLost,
    9672  uint32_t strategy,
    9673  VmaAllocationRequest* pAllocationRequest)
    9674 {
    9675  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9676 
    9677  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9678  // Whenever it might be an OPTIMAL image...
    9679  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9680  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9682  {
    9683  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9684  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9685  }
    9686 
    9687  if(allocSize > m_UsableSize)
    9688  {
    9689  return false;
    9690  }
    9691 
    9692  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9693  for(uint32_t level = targetLevel + 1; level--; )
    9694  {
    9695  for(Node* freeNode = m_FreeList[level].front;
    9696  freeNode != VMA_NULL;
    9697  freeNode = freeNode->free.next)
    9698  {
    9699  if(freeNode->offset % allocAlignment == 0)
    9700  {
    9701  pAllocationRequest->offset = freeNode->offset;
    9702  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9703  pAllocationRequest->sumItemSize = 0;
    9704  pAllocationRequest->itemsToMakeLostCount = 0;
    9705  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9706  return true;
    9707  }
    9708  }
    9709  }
    9710 
    9711  return false;
    9712 }
    9713 
    9714 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9715  uint32_t currentFrameIndex,
    9716  uint32_t frameInUseCount,
    9717  VmaAllocationRequest* pAllocationRequest)
    9718 {
    9719  /*
    9720  Lost allocations are not supported in buddy allocator at the moment.
    9721  Support might be added in the future.
    9722  */
    9723  return pAllocationRequest->itemsToMakeLostCount == 0;
    9724 }
    9725 
    9726 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9727 {
    9728  /*
    9729  Lost allocations are not supported in buddy allocator at the moment.
    9730  Support might be added in the future.
    9731  */
    9732  return 0;
    9733 }
    9734 
    9735 void VmaBlockMetadata_Buddy::Alloc(
    9736  const VmaAllocationRequest& request,
    9737  VmaSuballocationType type,
    9738  VkDeviceSize allocSize,
    9739  bool upperAddress,
    9740  VmaAllocation hAllocation)
    9741 {
    9742  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9743  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9744 
    9745  Node* currNode = m_FreeList[currLevel].front;
    9746  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9747  while(currNode->offset != request.offset)
    9748  {
    9749  currNode = currNode->free.next;
    9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9751  }
    9752 
    9753  // Go down, splitting free nodes.
    9754  while(currLevel < targetLevel)
    9755  {
    9756  // currNode is already first free node at currLevel.
    9757  // Remove it from list of free nodes at this currLevel.
    9758  RemoveFromFreeList(currLevel, currNode);
    9759 
    9760  const uint32_t childrenLevel = currLevel + 1;
    9761 
    9762  // Create two free sub-nodes.
    9763  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9764  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9765 
    9766  leftChild->offset = currNode->offset;
    9767  leftChild->type = Node::TYPE_FREE;
    9768  leftChild->parent = currNode;
    9769  leftChild->buddy = rightChild;
    9770 
    9771  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9772  rightChild->type = Node::TYPE_FREE;
    9773  rightChild->parent = currNode;
    9774  rightChild->buddy = leftChild;
    9775 
    9776  // Convert current currNode to split type.
    9777  currNode->type = Node::TYPE_SPLIT;
    9778  currNode->split.leftChild = leftChild;
    9779 
    9780  // Add child nodes to free list. Order is important!
    9781  AddToFreeListFront(childrenLevel, rightChild);
    9782  AddToFreeListFront(childrenLevel, leftChild);
    9783 
    9784  ++m_FreeCount;
    9785  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9786  ++currLevel;
    9787  currNode = m_FreeList[currLevel].front;
    9788 
    9789  /*
    9790  We can be sure that currNode, as left child of node previously split,
    9791  also fullfills the alignment requirement.
    9792  */
    9793  }
    9794 
    9795  // Remove from free list.
    9796  VMA_ASSERT(currLevel == targetLevel &&
    9797  currNode != VMA_NULL &&
    9798  currNode->type == Node::TYPE_FREE);
    9799  RemoveFromFreeList(currLevel, currNode);
    9800 
    9801  // Convert to allocation node.
    9802  currNode->type = Node::TYPE_ALLOCATION;
    9803  currNode->allocation.alloc = hAllocation;
    9804 
    9805  ++m_AllocationCount;
    9806  --m_FreeCount;
    9807  m_SumFreeSize -= allocSize;
    9808 }
    9809 
    9810 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9811 {
    9812  if(node->type == Node::TYPE_SPLIT)
    9813  {
    9814  DeleteNode(node->split.leftChild->buddy);
    9815  DeleteNode(node->split.leftChild);
    9816  }
    9817 
    9818  vma_delete(GetAllocationCallbacks(), node);
    9819 }
    9820 
    9821 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9822 {
    9823  VMA_VALIDATE(level < m_LevelCount);
    9824  VMA_VALIDATE(curr->parent == parent);
    9825  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9826  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9827  switch(curr->type)
    9828  {
    9829  case Node::TYPE_FREE:
    9830  // curr->free.prev, next are validated separately.
    9831  ctx.calculatedSumFreeSize += levelNodeSize;
    9832  ++ctx.calculatedFreeCount;
    9833  break;
    9834  case Node::TYPE_ALLOCATION:
    9835  ++ctx.calculatedAllocationCount;
    9836  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9837  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9838  break;
    9839  case Node::TYPE_SPLIT:
    9840  {
    9841  const uint32_t childrenLevel = level + 1;
    9842  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9843  const Node* const leftChild = curr->split.leftChild;
    9844  VMA_VALIDATE(leftChild != VMA_NULL);
    9845  VMA_VALIDATE(leftChild->offset == curr->offset);
    9846  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9847  {
    9848  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9849  }
    9850  const Node* const rightChild = leftChild->buddy;
    9851  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9852  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9853  {
    9854  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9855  }
    9856  }
    9857  break;
    9858  default:
    9859  return false;
    9860  }
    9861 
    9862  return true;
    9863 }
    9864 
    9865 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9866 {
    9867  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9868  uint32_t level = 0;
    9869  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9870  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9872  {
    9873  ++level;
    9874  currLevelNodeSize = nextLevelNodeSize;
    9875  nextLevelNodeSize = currLevelNodeSize >> 1;
    9876  }
    9877  return level;
    9878 }
    9879 
    9880 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9881 {
    9882  // Find node and level.
    9883  Node* node = m_Root;
    9884  VkDeviceSize nodeOffset = 0;
    9885  uint32_t level = 0;
    9886  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9887  while(node->type == Node::TYPE_SPLIT)
    9888  {
    9889  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9890  if(offset < nodeOffset + nextLevelSize)
    9891  {
    9892  node = node->split.leftChild;
    9893  }
    9894  else
    9895  {
    9896  node = node->split.leftChild->buddy;
    9897  nodeOffset += nextLevelSize;
    9898  }
    9899  ++level;
    9900  levelNodeSize = nextLevelSize;
    9901  }
    9902 
    9903  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9904  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9905 
    9906  ++m_FreeCount;
    9907  --m_AllocationCount;
    9908  m_SumFreeSize += alloc->GetSize();
    9909 
    9910  node->type = Node::TYPE_FREE;
    9911 
    9912  // Join free nodes if possible.
    9913  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9914  {
    9915  RemoveFromFreeList(level, node->buddy);
    9916  Node* const parent = node->parent;
    9917 
    9918  vma_delete(GetAllocationCallbacks(), node->buddy);
    9919  vma_delete(GetAllocationCallbacks(), node);
    9920  parent->type = Node::TYPE_FREE;
    9921 
    9922  node = parent;
    9923  --level;
    9924  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9925  --m_FreeCount;
    9926  }
    9927 
    9928  AddToFreeListFront(level, node);
    9929 }
    9930 
    9931 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9932 {
    9933  switch(node->type)
    9934  {
    9935  case Node::TYPE_FREE:
    9936  ++outInfo.unusedRangeCount;
    9937  outInfo.unusedBytes += levelNodeSize;
    9938  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9939  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9940  break;
    9941  case Node::TYPE_ALLOCATION:
    9942  {
    9943  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9944  ++outInfo.allocationCount;
    9945  outInfo.usedBytes += allocSize;
    9946  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9947  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9948 
    9949  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9950  if(unusedRangeSize > 0)
    9951  {
    9952  ++outInfo.unusedRangeCount;
    9953  outInfo.unusedBytes += unusedRangeSize;
    9954  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9955  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9956  }
    9957  }
    9958  break;
    9959  case Node::TYPE_SPLIT:
    9960  {
    9961  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9962  const Node* const leftChild = node->split.leftChild;
    9963  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9964  const Node* const rightChild = leftChild->buddy;
    9965  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9966  }
    9967  break;
    9968  default:
    9969  VMA_ASSERT(0);
    9970  }
    9971 }
    9972 
    9973 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9974 {
    9975  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9976 
    9977  // List is empty.
    9978  Node* const frontNode = m_FreeList[level].front;
    9979  if(frontNode == VMA_NULL)
    9980  {
    9981  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9982  node->free.prev = node->free.next = VMA_NULL;
    9983  m_FreeList[level].front = m_FreeList[level].back = node;
    9984  }
    9985  else
    9986  {
    9987  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9988  node->free.prev = VMA_NULL;
    9989  node->free.next = frontNode;
    9990  frontNode->free.prev = node;
    9991  m_FreeList[level].front = node;
    9992  }
    9993 }
    9994 
    9995 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9996 {
    9997  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9998 
    9999  // It is at the front.
    10000  if(node->free.prev == VMA_NULL)
    10001  {
    10002  VMA_ASSERT(m_FreeList[level].front == node);
    10003  m_FreeList[level].front = node->free.next;
    10004  }
    10005  else
    10006  {
    10007  Node* const prevFreeNode = node->free.prev;
    10008  VMA_ASSERT(prevFreeNode->free.next == node);
    10009  prevFreeNode->free.next = node->free.next;
    10010  }
    10011 
    10012  // It is at the back.
    10013  if(node->free.next == VMA_NULL)
    10014  {
    10015  VMA_ASSERT(m_FreeList[level].back == node);
    10016  m_FreeList[level].back = node->free.prev;
    10017  }
    10018  else
    10019  {
    10020  Node* const nextFreeNode = node->free.next;
    10021  VMA_ASSERT(nextFreeNode->free.prev == node);
    10022  nextFreeNode->free.prev = node->free.prev;
    10023  }
    10024 }
    10025 
    10026 #if VMA_STATS_STRING_ENABLED
    10027 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10028 {
    10029  switch(node->type)
    10030  {
    10031  case Node::TYPE_FREE:
    10032  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10033  break;
    10034  case Node::TYPE_ALLOCATION:
    10035  {
    10036  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10037  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10038  if(allocSize < levelNodeSize)
    10039  {
    10040  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10041  }
    10042  }
    10043  break;
    10044  case Node::TYPE_SPLIT:
    10045  {
    10046  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10047  const Node* const leftChild = node->split.leftChild;
    10048  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10049  const Node* const rightChild = leftChild->buddy;
    10050  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10051  }
    10052  break;
    10053  default:
    10054  VMA_ASSERT(0);
    10055  }
    10056 }
    10057 #endif // #if VMA_STATS_STRING_ENABLED
    10058 
    10059 
    10061 // class VmaDeviceMemoryBlock
    10062 
    10063 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10064  m_pMetadata(VMA_NULL),
    10065  m_MemoryTypeIndex(UINT32_MAX),
    10066  m_Id(0),
    10067  m_hMemory(VK_NULL_HANDLE),
    10068  m_MapCount(0),
    10069  m_pMappedData(VMA_NULL)
    10070 {
    10071 }
    10072 
    10073 void VmaDeviceMemoryBlock::Init(
    10074  VmaAllocator hAllocator,
    10075  uint32_t newMemoryTypeIndex,
    10076  VkDeviceMemory newMemory,
    10077  VkDeviceSize newSize,
    10078  uint32_t id,
    10079  uint32_t algorithm)
    10080 {
    10081  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10082 
    10083  m_MemoryTypeIndex = newMemoryTypeIndex;
    10084  m_Id = id;
    10085  m_hMemory = newMemory;
    10086 
    10087  switch(algorithm)
    10088  {
    10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10091  break;
    10093  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10094  break;
    10095  default:
    10096  VMA_ASSERT(0);
    10097  // Fall-through.
    10098  case 0:
    10099  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10100  }
    10101  m_pMetadata->Init(newSize);
    10102 }
    10103 
    10104 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10105 {
    10106  // This is the most important assert in the entire library.
    10107  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10108  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10109 
    10110  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10111  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10112  m_hMemory = VK_NULL_HANDLE;
    10113 
    10114  vma_delete(allocator, m_pMetadata);
    10115  m_pMetadata = VMA_NULL;
    10116 }
    10117 
    10118 bool VmaDeviceMemoryBlock::Validate() const
    10119 {
    10120  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10121  (m_pMetadata->GetSize() != 0));
    10122 
    10123  return m_pMetadata->Validate();
    10124 }
    10125 
    10126 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10127 {
    10128  void* pData = nullptr;
    10129  VkResult res = Map(hAllocator, 1, &pData);
    10130  if(res != VK_SUCCESS)
    10131  {
    10132  return res;
    10133  }
    10134 
    10135  res = m_pMetadata->CheckCorruption(pData);
    10136 
    10137  Unmap(hAllocator, 1);
    10138 
    10139  return res;
    10140 }
    10141 
    10142 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10143 {
    10144  if(count == 0)
    10145  {
    10146  return VK_SUCCESS;
    10147  }
    10148 
    10149  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10150  if(m_MapCount != 0)
    10151  {
    10152  m_MapCount += count;
    10153  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10154  if(ppData != VMA_NULL)
    10155  {
    10156  *ppData = m_pMappedData;
    10157  }
    10158  return VK_SUCCESS;
    10159  }
    10160  else
    10161  {
    10162  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10163  hAllocator->m_hDevice,
    10164  m_hMemory,
    10165  0, // offset
    10166  VK_WHOLE_SIZE,
    10167  0, // flags
    10168  &m_pMappedData);
    10169  if(result == VK_SUCCESS)
    10170  {
    10171  if(ppData != VMA_NULL)
    10172  {
    10173  *ppData = m_pMappedData;
    10174  }
    10175  m_MapCount = count;
    10176  }
    10177  return result;
    10178  }
    10179 }
    10180 
    10181 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10182 {
    10183  if(count == 0)
    10184  {
    10185  return;
    10186  }
    10187 
    10188  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10189  if(m_MapCount >= count)
    10190  {
    10191  m_MapCount -= count;
    10192  if(m_MapCount == 0)
    10193  {
    10194  m_pMappedData = VMA_NULL;
    10195  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10196  }
    10197  }
    10198  else
    10199  {
    10200  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10201  }
    10202 }
    10203 
    10204 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10205 {
    10206  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10207  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10208 
    10209  void* pData;
    10210  VkResult res = Map(hAllocator, 1, &pData);
    10211  if(res != VK_SUCCESS)
    10212  {
    10213  return res;
    10214  }
    10215 
    10216  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10217  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10218 
    10219  Unmap(hAllocator, 1);
    10220 
    10221  return VK_SUCCESS;
    10222 }
    10223 
    10224 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10225 {
    10226  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10227  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10228 
    10229  void* pData;
    10230  VkResult res = Map(hAllocator, 1, &pData);
    10231  if(res != VK_SUCCESS)
    10232  {
    10233  return res;
    10234  }
    10235 
    10236  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10237  {
    10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10239  }
    10240  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10241  {
    10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10243  }
    10244 
    10245  Unmap(hAllocator, 1);
    10246 
    10247  return VK_SUCCESS;
    10248 }
    10249 
    10250 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10251  const VmaAllocator hAllocator,
    10252  const VmaAllocation hAllocation,
    10253  VkBuffer hBuffer)
    10254 {
    10255  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10256  hAllocation->GetBlock() == this);
    10257  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10258  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10259  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10260  hAllocator->m_hDevice,
    10261  hBuffer,
    10262  m_hMemory,
    10263  hAllocation->GetOffset());
    10264 }
    10265 
    10266 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10267  const VmaAllocator hAllocator,
    10268  const VmaAllocation hAllocation,
    10269  VkImage hImage)
    10270 {
    10271  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10272  hAllocation->GetBlock() == this);
    10273  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10274  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10275  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10276  hAllocator->m_hDevice,
    10277  hImage,
    10278  m_hMemory,
    10279  hAllocation->GetOffset());
    10280 }
    10281 
    10282 static void InitStatInfo(VmaStatInfo& outInfo)
    10283 {
    10284  memset(&outInfo, 0, sizeof(outInfo));
    10285  outInfo.allocationSizeMin = UINT64_MAX;
    10286  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10287 }
    10288 
    10289 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10290 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10291 {
    10292  inoutInfo.blockCount += srcInfo.blockCount;
    10293  inoutInfo.allocationCount += srcInfo.allocationCount;
    10294  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10295  inoutInfo.usedBytes += srcInfo.usedBytes;
    10296  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10297  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10298  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10299  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10300  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10301 }
    10302 
    10303 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10304 {
    10305  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10306  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10307  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10308  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10309 }
    10310 
    10311 VmaPool_T::VmaPool_T(
    10312  VmaAllocator hAllocator,
    10313  const VmaPoolCreateInfo& createInfo,
    10314  VkDeviceSize preferredBlockSize) :
    10315  m_BlockVector(
    10316  hAllocator,
    10317  createInfo.memoryTypeIndex,
    10318  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10319  createInfo.minBlockCount,
    10320  createInfo.maxBlockCount,
    10321  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10322  createInfo.frameInUseCount,
    10323  true, // isCustomPool
    10324  createInfo.blockSize != 0, // explicitBlockSize
    10325  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10326  m_Id(0)
    10327 {
    10328 }
    10329 
    10330 VmaPool_T::~VmaPool_T()
    10331 {
    10332 }
    10333 
    10334 #if VMA_STATS_STRING_ENABLED
    10335 
    10336 #endif // #if VMA_STATS_STRING_ENABLED
    10337 
    10338 VmaBlockVector::VmaBlockVector(
    10339  VmaAllocator hAllocator,
    10340  uint32_t memoryTypeIndex,
    10341  VkDeviceSize preferredBlockSize,
    10342  size_t minBlockCount,
    10343  size_t maxBlockCount,
    10344  VkDeviceSize bufferImageGranularity,
    10345  uint32_t frameInUseCount,
    10346  bool isCustomPool,
    10347  bool explicitBlockSize,
    10348  uint32_t algorithm) :
    10349  m_hAllocator(hAllocator),
    10350  m_MemoryTypeIndex(memoryTypeIndex),
    10351  m_PreferredBlockSize(preferredBlockSize),
    10352  m_MinBlockCount(minBlockCount),
    10353  m_MaxBlockCount(maxBlockCount),
    10354  m_BufferImageGranularity(bufferImageGranularity),
    10355  m_FrameInUseCount(frameInUseCount),
    10356  m_IsCustomPool(isCustomPool),
    10357  m_ExplicitBlockSize(explicitBlockSize),
    10358  m_Algorithm(algorithm),
    10359  m_HasEmptyBlock(false),
    10360  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10361  m_pDefragmentator(VMA_NULL),
    10362  m_NextBlockId(0)
    10363 {
    10364 }
    10365 
    10366 VmaBlockVector::~VmaBlockVector()
    10367 {
    10368  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10369 
    10370  for(size_t i = m_Blocks.size(); i--; )
    10371  {
    10372  m_Blocks[i]->Destroy(m_hAllocator);
    10373  vma_delete(m_hAllocator, m_Blocks[i]);
    10374  }
    10375 }
    10376 
    10377 VkResult VmaBlockVector::CreateMinBlocks()
    10378 {
    10379  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10380  {
    10381  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10382  if(res != VK_SUCCESS)
    10383  {
    10384  return res;
    10385  }
    10386  }
    10387  return VK_SUCCESS;
    10388 }
    10389 
    10390 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10391 {
    10392  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10393 
    10394  const size_t blockCount = m_Blocks.size();
    10395 
    10396  pStats->size = 0;
    10397  pStats->unusedSize = 0;
    10398  pStats->allocationCount = 0;
    10399  pStats->unusedRangeCount = 0;
    10400  pStats->unusedRangeSizeMax = 0;
    10401  pStats->blockCount = blockCount;
    10402 
    10403  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10404  {
    10405  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10406  VMA_ASSERT(pBlock);
    10407  VMA_HEAVY_ASSERT(pBlock->Validate());
    10408  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10409  }
    10410 }
    10411 
    10412 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10413 {
    10414  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10415  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10416  (VMA_DEBUG_MARGIN > 0) &&
    10417  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10418 }
    10419 
    10420 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10421 
    10422 VkResult VmaBlockVector::Allocate(
    10423  VmaPool hCurrentPool,
    10424  uint32_t currentFrameIndex,
    10425  VkDeviceSize size,
    10426  VkDeviceSize alignment,
    10427  const VmaAllocationCreateInfo& createInfo,
    10428  VmaSuballocationType suballocType,
    10429  VmaAllocation* pAllocation)
    10430 {
    10431  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10432  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10433  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10434  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10435  const bool canCreateNewBlock =
    10436  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10437  (m_Blocks.size() < m_MaxBlockCount);
    10438  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10439 
    10440  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10441  // Which in turn is available only when maxBlockCount = 1.
    10442  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10443  {
    10444  canMakeOtherLost = false;
    10445  }
    10446 
    10447  // Upper address can only be used with linear allocator and within single memory block.
    10448  if(isUpperAddress &&
    10449  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10450  {
    10451  return VK_ERROR_FEATURE_NOT_PRESENT;
    10452  }
    10453 
    10454  // Validate strategy.
    10455  switch(strategy)
    10456  {
    10457  case 0:
    10459  break;
    10463  break;
    10464  default:
    10465  return VK_ERROR_FEATURE_NOT_PRESENT;
    10466  }
    10467 
    10468  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10469  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10470  {
    10471  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10472  }
    10473 
    10474  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10475 
    10476  /*
    10477  Under certain condition, this whole section can be skipped for optimization, so
    10478  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10479  e.g. for custom pools with linear algorithm.
    10480  */
    10481  if(!canMakeOtherLost || canCreateNewBlock)
    10482  {
    10483  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10484  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10486 
    10487  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10488  {
    10489  // Use only last block.
    10490  if(!m_Blocks.empty())
    10491  {
    10492  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10493  VMA_ASSERT(pCurrBlock);
    10494  VkResult res = AllocateFromBlock(
    10495  pCurrBlock,
    10496  hCurrentPool,
    10497  currentFrameIndex,
    10498  size,
    10499  alignment,
    10500  allocFlagsCopy,
    10501  createInfo.pUserData,
    10502  suballocType,
    10503  strategy,
    10504  pAllocation);
    10505  if(res == VK_SUCCESS)
    10506  {
    10507  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10508  return VK_SUCCESS;
    10509  }
    10510  }
    10511  }
    10512  else
    10513  {
    10515  {
    10516  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10517  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10518  {
    10519  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10520  VMA_ASSERT(pCurrBlock);
    10521  VkResult res = AllocateFromBlock(
    10522  pCurrBlock,
    10523  hCurrentPool,
    10524  currentFrameIndex,
    10525  size,
    10526  alignment,
    10527  allocFlagsCopy,
    10528  createInfo.pUserData,
    10529  suballocType,
    10530  strategy,
    10531  pAllocation);
    10532  if(res == VK_SUCCESS)
    10533  {
    10534  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10535  return VK_SUCCESS;
    10536  }
    10537  }
    10538  }
    10539  else // WORST_FIT, FIRST_FIT
    10540  {
    10541  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10542  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10543  {
    10544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10545  VMA_ASSERT(pCurrBlock);
    10546  VkResult res = AllocateFromBlock(
    10547  pCurrBlock,
    10548  hCurrentPool,
    10549  currentFrameIndex,
    10550  size,
    10551  alignment,
    10552  allocFlagsCopy,
    10553  createInfo.pUserData,
    10554  suballocType,
    10555  strategy,
    10556  pAllocation);
    10557  if(res == VK_SUCCESS)
    10558  {
    10559  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10560  return VK_SUCCESS;
    10561  }
    10562  }
    10563  }
    10564  }
    10565 
    10566  // 2. Try to create new block.
    10567  if(canCreateNewBlock)
    10568  {
    10569  // Calculate optimal size for new block.
    10570  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10571  uint32_t newBlockSizeShift = 0;
    10572  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10573 
    10574  if(!m_ExplicitBlockSize)
    10575  {
    10576  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10577  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10578  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10579  {
    10580  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10581  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10582  {
    10583  newBlockSize = smallerNewBlockSize;
    10584  ++newBlockSizeShift;
    10585  }
    10586  else
    10587  {
    10588  break;
    10589  }
    10590  }
    10591  }
    10592 
    10593  size_t newBlockIndex = 0;
    10594  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10595  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10596  if(!m_ExplicitBlockSize)
    10597  {
    10598  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10599  {
    10600  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10601  if(smallerNewBlockSize >= size)
    10602  {
    10603  newBlockSize = smallerNewBlockSize;
    10604  ++newBlockSizeShift;
    10605  res = CreateBlock(newBlockSize, &newBlockIndex);
    10606  }
    10607  else
    10608  {
    10609  break;
    10610  }
    10611  }
    10612  }
    10613 
    10614  if(res == VK_SUCCESS)
    10615  {
    10616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10617  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10618 
    10619  res = AllocateFromBlock(
    10620  pBlock,
    10621  hCurrentPool,
    10622  currentFrameIndex,
    10623  size,
    10624  alignment,
    10625  allocFlagsCopy,
    10626  createInfo.pUserData,
    10627  suballocType,
    10628  strategy,
    10629  pAllocation);
    10630  if(res == VK_SUCCESS)
    10631  {
    10632  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10633  return VK_SUCCESS;
    10634  }
    10635  else
    10636  {
    10637  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10638  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10639  }
    10640  }
    10641  }
    10642  }
    10643 
    10644  // 3. Try to allocate from existing blocks with making other allocations lost.
    10645  if(canMakeOtherLost)
    10646  {
    10647  uint32_t tryIndex = 0;
    10648  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10649  {
    10650  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10651  VmaAllocationRequest bestRequest = {};
    10652  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10653 
    10654  // 1. Search existing allocations.
    10656  {
    10657  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10658  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10659  {
    10660  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10661  VMA_ASSERT(pCurrBlock);
    10662  VmaAllocationRequest currRequest = {};
    10663  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10664  currentFrameIndex,
    10665  m_FrameInUseCount,
    10666  m_BufferImageGranularity,
    10667  size,
    10668  alignment,
    10669  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10670  suballocType,
    10671  canMakeOtherLost,
    10672  strategy,
    10673  &currRequest))
    10674  {
    10675  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10676  if(pBestRequestBlock == VMA_NULL ||
    10677  currRequestCost < bestRequestCost)
    10678  {
    10679  pBestRequestBlock = pCurrBlock;
    10680  bestRequest = currRequest;
    10681  bestRequestCost = currRequestCost;
    10682 
    10683  if(bestRequestCost == 0)
    10684  {
    10685  break;
    10686  }
    10687  }
    10688  }
    10689  }
    10690  }
    10691  else // WORST_FIT, FIRST_FIT
    10692  {
    10693  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10694  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10695  {
    10696  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10697  VMA_ASSERT(pCurrBlock);
    10698  VmaAllocationRequest currRequest = {};
    10699  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10700  currentFrameIndex,
    10701  m_FrameInUseCount,
    10702  m_BufferImageGranularity,
    10703  size,
    10704  alignment,
    10705  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10706  suballocType,
    10707  canMakeOtherLost,
    10708  strategy,
    10709  &currRequest))
    10710  {
    10711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10712  if(pBestRequestBlock == VMA_NULL ||
    10713  currRequestCost < bestRequestCost ||
    10715  {
    10716  pBestRequestBlock = pCurrBlock;
    10717  bestRequest = currRequest;
    10718  bestRequestCost = currRequestCost;
    10719 
    10720  if(bestRequestCost == 0 ||
    10722  {
    10723  break;
    10724  }
    10725  }
    10726  }
    10727  }
    10728  }
    10729 
    10730  if(pBestRequestBlock != VMA_NULL)
    10731  {
    10732  if(mapped)
    10733  {
    10734  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10735  if(res != VK_SUCCESS)
    10736  {
    10737  return res;
    10738  }
    10739  }
    10740 
    10741  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10742  currentFrameIndex,
    10743  m_FrameInUseCount,
    10744  &bestRequest))
    10745  {
    10746  // We no longer have an empty Allocation.
    10747  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10748  {
    10749  m_HasEmptyBlock = false;
    10750  }
    10751  // Allocate from this pBlock.
    10752  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10753  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10754  (*pAllocation)->InitBlockAllocation(
    10755  hCurrentPool,
    10756  pBestRequestBlock,
    10757  bestRequest.offset,
    10758  alignment,
    10759  size,
    10760  suballocType,
    10761  mapped,
    10762  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10763  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10764  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10765  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  // else: Some allocations must have been touched while we are here. Next try.
    10778  }
    10779  else
    10780  {
    10781  // Could not find place in any of the blocks - break outer loop.
    10782  break;
    10783  }
    10784  }
    10785  /* Maximum number of tries exceeded - a very unlike event when many other
    10786  threads are simultaneously touching allocations making it impossible to make
    10787  lost at the same time as we try to allocate. */
    10788  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10789  {
    10790  return VK_ERROR_TOO_MANY_OBJECTS;
    10791  }
    10792  }
    10793 
    10794  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10795 }
    10796 
    10797 void VmaBlockVector::Free(
    10798  VmaAllocation hAllocation)
    10799 {
    10800  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10801 
    10802  // Scope for lock.
    10803  {
    10804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10805 
    10806  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10807 
    10808  if(IsCorruptionDetectionEnabled())
    10809  {
    10810  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10811  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10812  }
    10813 
    10814  if(hAllocation->IsPersistentMap())
    10815  {
    10816  pBlock->Unmap(m_hAllocator, 1);
    10817  }
    10818 
    10819  pBlock->m_pMetadata->Free(hAllocation);
    10820  VMA_HEAVY_ASSERT(pBlock->Validate());
    10821 
    10822  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10823 
    10824  // pBlock became empty after this deallocation.
    10825  if(pBlock->m_pMetadata->IsEmpty())
    10826  {
    10827  // Already has empty Allocation. We don't want to have two, so delete this one.
    10828  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10829  {
    10830  pBlockToDelete = pBlock;
    10831  Remove(pBlock);
    10832  }
    10833  // We now have first empty block.
    10834  else
    10835  {
    10836  m_HasEmptyBlock = true;
    10837  }
    10838  }
    10839  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10840  // (This is optional, heuristics.)
    10841  else if(m_HasEmptyBlock)
    10842  {
    10843  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10844  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10845  {
    10846  pBlockToDelete = pLastBlock;
    10847  m_Blocks.pop_back();
    10848  m_HasEmptyBlock = false;
    10849  }
    10850  }
    10851 
    10852  IncrementallySortBlocks();
    10853  }
    10854 
    10855  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10856  // lock, for performance reason.
    10857  if(pBlockToDelete != VMA_NULL)
    10858  {
    10859  VMA_DEBUG_LOG(" Deleted empty allocation");
    10860  pBlockToDelete->Destroy(m_hAllocator);
    10861  vma_delete(m_hAllocator, pBlockToDelete);
    10862  }
    10863 }
    10864 
    10865 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10866 {
    10867  VkDeviceSize result = 0;
    10868  for(size_t i = m_Blocks.size(); i--; )
    10869  {
    10870  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10871  if(result >= m_PreferredBlockSize)
    10872  {
    10873  break;
    10874  }
    10875  }
    10876  return result;
    10877 }
    10878 
    10879 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10880 {
    10881  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10882  {
    10883  if(m_Blocks[blockIndex] == pBlock)
    10884  {
    10885  VmaVectorRemove(m_Blocks, blockIndex);
    10886  return;
    10887  }
    10888  }
    10889  VMA_ASSERT(0);
    10890 }
    10891 
    10892 void VmaBlockVector::IncrementallySortBlocks()
    10893 {
    10894  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10895  {
    10896  // Bubble sort only until first swap.
    10897  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10898  {
    10899  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10900  {
    10901  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10902  return;
    10903  }
    10904  }
    10905  }
    10906 }
    10907 
    10908 VkResult VmaBlockVector::AllocateFromBlock(
    10909  VmaDeviceMemoryBlock* pBlock,
    10910  VmaPool hCurrentPool,
    10911  uint32_t currentFrameIndex,
    10912  VkDeviceSize size,
    10913  VkDeviceSize alignment,
    10914  VmaAllocationCreateFlags allocFlags,
    10915  void* pUserData,
    10916  VmaSuballocationType suballocType,
    10917  uint32_t strategy,
    10918  VmaAllocation* pAllocation)
    10919 {
    10920  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10921  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10922  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10923  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10924 
    10925  VmaAllocationRequest currRequest = {};
    10926  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10927  currentFrameIndex,
    10928  m_FrameInUseCount,
    10929  m_BufferImageGranularity,
    10930  size,
    10931  alignment,
    10932  isUpperAddress,
    10933  suballocType,
    10934  false, // canMakeOtherLost
    10935  strategy,
    10936  &currRequest))
    10937  {
    10938  // Allocate from pCurrBlock.
    10939  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10940 
    10941  if(mapped)
    10942  {
    10943  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10944  if(res != VK_SUCCESS)
    10945  {
    10946  return res;
    10947  }
    10948  }
    10949 
    10950  // We no longer have an empty Allocation.
    10951  if(pBlock->m_pMetadata->IsEmpty())
    10952  {
    10953  m_HasEmptyBlock = false;
    10954  }
    10955 
    10956  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10957  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10958  (*pAllocation)->InitBlockAllocation(
    10959  hCurrentPool,
    10960  pBlock,
    10961  currRequest.offset,
    10962  alignment,
    10963  size,
    10964  suballocType,
    10965  mapped,
    10966  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10967  VMA_HEAVY_ASSERT(pBlock->Validate());
    10968  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10969  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10970  {
    10971  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10972  }
    10973  if(IsCorruptionDetectionEnabled())
    10974  {
    10975  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10977  }
    10978  return VK_SUCCESS;
    10979  }
    10980  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10984 {
    10985  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10986  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10987  allocInfo.allocationSize = blockSize;
    10988  VkDeviceMemory mem = VK_NULL_HANDLE;
    10989  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10990  if(res < 0)
    10991  {
    10992  return res;
    10993  }
    10994 
    10995  // New VkDeviceMemory successfully created.
    10996 
    10997  // Create new Allocation for it.
    10998  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10999  pBlock->Init(
    11000  m_hAllocator,
    11001  m_MemoryTypeIndex,
    11002  mem,
    11003  allocInfo.allocationSize,
    11004  m_NextBlockId++,
    11005  m_Algorithm);
    11006 
    11007  m_Blocks.push_back(pBlock);
    11008  if(pNewBlockIndex != VMA_NULL)
    11009  {
    11010  *pNewBlockIndex = m_Blocks.size() - 1;
    11011  }
    11012 
    11013  return VK_SUCCESS;
    11014 }
    11015 
    11016 #if VMA_STATS_STRING_ENABLED
    11017 
    11018 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11019 {
    11020  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11021 
    11022  json.BeginObject();
    11023 
    11024  if(m_IsCustomPool)
    11025  {
    11026  json.WriteString("MemoryTypeIndex");
    11027  json.WriteNumber(m_MemoryTypeIndex);
    11028 
    11029  json.WriteString("BlockSize");
    11030  json.WriteNumber(m_PreferredBlockSize);
    11031 
    11032  json.WriteString("BlockCount");
    11033  json.BeginObject(true);
    11034  if(m_MinBlockCount > 0)
    11035  {
    11036  json.WriteString("Min");
    11037  json.WriteNumber((uint64_t)m_MinBlockCount);
    11038  }
    11039  if(m_MaxBlockCount < SIZE_MAX)
    11040  {
    11041  json.WriteString("Max");
    11042  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11043  }
    11044  json.WriteString("Cur");
    11045  json.WriteNumber((uint64_t)m_Blocks.size());
    11046  json.EndObject();
    11047 
    11048  if(m_FrameInUseCount > 0)
    11049  {
    11050  json.WriteString("FrameInUseCount");
    11051  json.WriteNumber(m_FrameInUseCount);
    11052  }
    11053 
    11054  if(m_Algorithm != 0)
    11055  {
    11056  json.WriteString("Algorithm");
    11057  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11058  }
    11059  }
    11060  else
    11061  {
    11062  json.WriteString("PreferredBlockSize");
    11063  json.WriteNumber(m_PreferredBlockSize);
    11064  }
    11065 
    11066  json.WriteString("Blocks");
    11067  json.BeginObject();
    11068  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11069  {
    11070  json.BeginString();
    11071  json.ContinueString(m_Blocks[i]->GetId());
    11072  json.EndString();
    11073 
    11074  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11075  }
    11076  json.EndObject();
    11077 
    11078  json.EndObject();
    11079 }
    11080 
    11081 #endif // #if VMA_STATS_STRING_ENABLED
    11082 
    11083 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11084  VmaAllocator hAllocator,
    11085  uint32_t currentFrameIndex)
    11086 {
    11087  if(m_pDefragmentator == VMA_NULL)
    11088  {
    11089  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11090  hAllocator,
    11091  this,
    11092  currentFrameIndex);
    11093  }
    11094 
    11095  return m_pDefragmentator;
    11096 }
    11097 
    11098 VkResult VmaBlockVector::Defragment(
    11099  VmaDefragmentationStats* pDefragmentationStats,
    11100  VkDeviceSize& maxBytesToMove,
    11101  uint32_t& maxAllocationsToMove)
    11102 {
    11103  if(m_pDefragmentator == VMA_NULL)
    11104  {
    11105  return VK_SUCCESS;
    11106  }
    11107 
    11108  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11109 
    11110  // Defragment.
    11111  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11112 
    11113  // Accumulate statistics.
    11114  if(pDefragmentationStats != VMA_NULL)
    11115  {
    11116  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11117  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11118  pDefragmentationStats->bytesMoved += bytesMoved;
    11119  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11120  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11121  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11122  maxBytesToMove -= bytesMoved;
    11123  maxAllocationsToMove -= allocationsMoved;
    11124  }
    11125 
    11126  // Free empty blocks.
    11127  m_HasEmptyBlock = false;
    11128  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11129  {
    11130  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11131  if(pBlock->m_pMetadata->IsEmpty())
    11132  {
    11133  if(m_Blocks.size() > m_MinBlockCount)
    11134  {
    11135  if(pDefragmentationStats != VMA_NULL)
    11136  {
    11137  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11138  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11139  }
    11140 
    11141  VmaVectorRemove(m_Blocks, blockIndex);
    11142  pBlock->Destroy(m_hAllocator);
    11143  vma_delete(m_hAllocator, pBlock);
    11144  }
    11145  else
    11146  {
    11147  m_HasEmptyBlock = true;
    11148  }
    11149  }
    11150  }
    11151 
    11152  return result;
    11153 }
    11154 
    11155 void VmaBlockVector::DestroyDefragmentator()
    11156 {
    11157  if(m_pDefragmentator != VMA_NULL)
    11158  {
    11159  vma_delete(m_hAllocator, m_pDefragmentator);
    11160  m_pDefragmentator = VMA_NULL;
    11161  }
    11162 }
    11163 
    11164 void VmaBlockVector::MakePoolAllocationsLost(
    11165  uint32_t currentFrameIndex,
    11166  size_t* pLostAllocationCount)
    11167 {
    11168  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11169  size_t lostAllocationCount = 0;
    11170  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11171  {
    11172  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11173  VMA_ASSERT(pBlock);
    11174  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11175  }
    11176  if(pLostAllocationCount != VMA_NULL)
    11177  {
    11178  *pLostAllocationCount = lostAllocationCount;
    11179  }
    11180 }
    11181 
    11182 VkResult VmaBlockVector::CheckCorruption()
    11183 {
    11184  if(!IsCorruptionDetectionEnabled())
    11185  {
    11186  return VK_ERROR_FEATURE_NOT_PRESENT;
    11187  }
    11188 
    11189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11190  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11191  {
    11192  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11193  VMA_ASSERT(pBlock);
    11194  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11195  if(res != VK_SUCCESS)
    11196  {
    11197  return res;
    11198  }
    11199  }
    11200  return VK_SUCCESS;
    11201 }
    11202 
    11203 void VmaBlockVector::AddStats(VmaStats* pStats)
    11204 {
    11205  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11206  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11207 
    11208  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11209 
    11210  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11211  {
    11212  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11213  VMA_ASSERT(pBlock);
    11214  VMA_HEAVY_ASSERT(pBlock->Validate());
    11215  VmaStatInfo allocationStatInfo;
    11216  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11217  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11218  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11220  }
    11221 }
    11222 
    11224 // VmaDefragmentator members definition
    11225 
    11226 VmaDefragmentator::VmaDefragmentator(
    11227  VmaAllocator hAllocator,
    11228  VmaBlockVector* pBlockVector,
    11229  uint32_t currentFrameIndex) :
    11230  m_hAllocator(hAllocator),
    11231  m_pBlockVector(pBlockVector),
    11232  m_CurrentFrameIndex(currentFrameIndex),
    11233  m_BytesMoved(0),
    11234  m_AllocationsMoved(0),
    11235  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11236  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11237 {
    11238  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11239 }
    11240 
    11241 VmaDefragmentator::~VmaDefragmentator()
    11242 {
    11243  for(size_t i = m_Blocks.size(); i--; )
    11244  {
    11245  vma_delete(m_hAllocator, m_Blocks[i]);
    11246  }
    11247 }
    11248 
    11249 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11250 {
    11251  AllocationInfo allocInfo;
    11252  allocInfo.m_hAllocation = hAlloc;
    11253  allocInfo.m_pChanged = pChanged;
    11254  m_Allocations.push_back(allocInfo);
    11255 }
    11256 
    11257 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11258 {
    11259  // It has already been mapped for defragmentation.
    11260  if(m_pMappedDataForDefragmentation)
    11261  {
    11262  *ppMappedData = m_pMappedDataForDefragmentation;
    11263  return VK_SUCCESS;
    11264  }
    11265 
    11266  // It is originally mapped.
    11267  if(m_pBlock->GetMappedData())
    11268  {
    11269  *ppMappedData = m_pBlock->GetMappedData();
    11270  return VK_SUCCESS;
    11271  }
    11272 
    11273  // Map on first usage.
    11274  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11275  *ppMappedData = m_pMappedDataForDefragmentation;
    11276  return res;
    11277 }
    11278 
    11279 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11280 {
    11281  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11282  {
    11283  m_pBlock->Unmap(hAllocator, 1);
    11284  }
    11285 }
    11286 
    11287 VkResult VmaDefragmentator::DefragmentRound(
    11288  VkDeviceSize maxBytesToMove,
    11289  uint32_t maxAllocationsToMove)
    11290 {
    11291  if(m_Blocks.empty())
    11292  {
    11293  return VK_SUCCESS;
    11294  }
    11295 
    11296  size_t srcBlockIndex = m_Blocks.size() - 1;
    11297  size_t srcAllocIndex = SIZE_MAX;
    11298  for(;;)
    11299  {
    11300  // 1. Find next allocation to move.
    11301  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11302  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11303  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11304  {
    11305  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11306  {
    11307  // Finished: no more allocations to process.
    11308  if(srcBlockIndex == 0)
    11309  {
    11310  return VK_SUCCESS;
    11311  }
    11312  else
    11313  {
    11314  --srcBlockIndex;
    11315  srcAllocIndex = SIZE_MAX;
    11316  }
    11317  }
    11318  else
    11319  {
    11320  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11321  }
    11322  }
    11323 
    11324  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11325  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11326 
    11327  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11328  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11329  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11330  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11331 
    11332  // 2. Try to find new place for this allocation in preceding or current block.
    11333  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11334  {
    11335  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11336  VmaAllocationRequest dstAllocRequest;
    11337  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11338  m_CurrentFrameIndex,
    11339  m_pBlockVector->GetFrameInUseCount(),
    11340  m_pBlockVector->GetBufferImageGranularity(),
    11341  size,
    11342  alignment,
    11343  false, // upperAddress
    11344  suballocType,
    11345  false, // canMakeOtherLost
    11347  &dstAllocRequest) &&
    11348  MoveMakesSense(
    11349  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11350  {
    11351  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11352 
    11353  // Reached limit on number of allocations or bytes to move.
    11354  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11355  (m_BytesMoved + size > maxBytesToMove))
    11356  {
    11357  return VK_INCOMPLETE;
    11358  }
    11359 
    11360  void* pDstMappedData = VMA_NULL;
    11361  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11362  if(res != VK_SUCCESS)
    11363  {
    11364  return res;
    11365  }
    11366 
    11367  void* pSrcMappedData = VMA_NULL;
    11368  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11369  if(res != VK_SUCCESS)
    11370  {
    11371  return res;
    11372  }
    11373 
    11374  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11375  memcpy(
    11376  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11377  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11378  static_cast<size_t>(size));
    11379 
    11380  if(VMA_DEBUG_MARGIN > 0)
    11381  {
    11382  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11384  }
    11385 
    11386  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11387  dstAllocRequest,
    11388  suballocType,
    11389  size,
    11390  false, // upperAddress
    11391  allocInfo.m_hAllocation);
    11392  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11393 
    11394  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11395 
    11396  if(allocInfo.m_pChanged != VMA_NULL)
    11397  {
    11398  *allocInfo.m_pChanged = VK_TRUE;
    11399  }
    11400 
    11401  ++m_AllocationsMoved;
    11402  m_BytesMoved += size;
    11403 
    11404  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11405 
    11406  break;
    11407  }
    11408  }
    11409 
    11410  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11411 
    11412  if(srcAllocIndex > 0)
    11413  {
    11414  --srcAllocIndex;
    11415  }
    11416  else
    11417  {
    11418  if(srcBlockIndex > 0)
    11419  {
    11420  --srcBlockIndex;
    11421  srcAllocIndex = SIZE_MAX;
    11422  }
    11423  else
    11424  {
    11425  return VK_SUCCESS;
    11426  }
    11427  }
    11428  }
    11429 }
    11430 
    11431 VkResult VmaDefragmentator::Defragment(
    11432  VkDeviceSize maxBytesToMove,
    11433  uint32_t maxAllocationsToMove)
    11434 {
    11435  if(m_Allocations.empty())
    11436  {
    11437  return VK_SUCCESS;
    11438  }
    11439 
    11440  // Create block info for each block.
    11441  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11442  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11443  {
    11444  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11445  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11446  m_Blocks.push_back(pBlockInfo);
    11447  }
    11448 
    11449  // Sort them by m_pBlock pointer value.
    11450  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11451 
    11452  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11453  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11454  {
    11455  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11456  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11457  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11458  {
    11459  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11460  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11461  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11462  {
    11463  (*it)->m_Allocations.push_back(allocInfo);
    11464  }
    11465  else
    11466  {
    11467  VMA_ASSERT(0);
    11468  }
    11469  }
    11470  }
    11471  m_Allocations.clear();
    11472 
    11473  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11474  {
    11475  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11476  pBlockInfo->CalcHasNonMovableAllocations();
    11477  pBlockInfo->SortAllocationsBySizeDescecnding();
    11478  }
    11479 
    11480  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11481  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11482 
    11483  // Execute defragmentation rounds (the main part).
    11484  VkResult result = VK_SUCCESS;
    11485  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11486  {
    11487  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11488  }
    11489 
    11490  // Unmap blocks that were mapped for defragmentation.
    11491  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11492  {
    11493  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11494  }
    11495 
    11496  return result;
    11497 }
    11498 
    11499 bool VmaDefragmentator::MoveMakesSense(
    11500  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11501  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11502 {
    11503  if(dstBlockIndex < srcBlockIndex)
    11504  {
    11505  return true;
    11506  }
    11507  if(dstBlockIndex > srcBlockIndex)
    11508  {
    11509  return false;
    11510  }
    11511  if(dstOffset < srcOffset)
    11512  {
    11513  return true;
    11514  }
    11515  return false;
    11516 }
    11517 
    11519 // VmaRecorder
    11520 
    11521 #if VMA_RECORDING_ENABLED
    11522 
    11523 VmaRecorder::VmaRecorder() :
    11524  m_UseMutex(true),
    11525  m_Flags(0),
    11526  m_File(VMA_NULL),
    11527  m_Freq(INT64_MAX),
    11528  m_StartCounter(INT64_MAX)
    11529 {
    11530 }
    11531 
    11532 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11533 {
    11534  m_UseMutex = useMutex;
    11535  m_Flags = settings.flags;
    11536 
    11537  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11538  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11539 
    11540  // Open file for writing.
    11541  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11542  if(err != 0)
    11543  {
    11544  return VK_ERROR_INITIALIZATION_FAILED;
    11545  }
    11546 
    11547  // Write header.
    11548  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11549  fprintf(m_File, "%s\n", "1,4");
    11550 
    11551  return VK_SUCCESS;
    11552 }
    11553 
    11554 VmaRecorder::~VmaRecorder()
    11555 {
    11556  if(m_File != VMA_NULL)
    11557  {
    11558  fclose(m_File);
    11559  }
    11560 }
    11561 
    11562 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11563 {
    11564  CallParams callParams;
    11565  GetBasicParams(callParams);
    11566 
    11567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11568  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11569  Flush();
    11570 }
    11571 
    11572 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11573 {
    11574  CallParams callParams;
    11575  GetBasicParams(callParams);
    11576 
    11577  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11578  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11589  createInfo.memoryTypeIndex,
    11590  createInfo.flags,
    11591  createInfo.blockSize,
    11592  (uint64_t)createInfo.minBlockCount,
    11593  (uint64_t)createInfo.maxBlockCount,
    11594  createInfo.frameInUseCount,
    11595  pool);
    11596  Flush();
    11597 }
    11598 
    11599 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11606  pool);
    11607  Flush();
    11608 }
    11609 
    11610 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11611  const VkMemoryRequirements& vkMemReq,
    11612  const VmaAllocationCreateInfo& createInfo,
    11613  VmaAllocation allocation)
    11614 {
    11615  CallParams callParams;
    11616  GetBasicParams(callParams);
    11617 
    11618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11619  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11620  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11621  vkMemReq.size,
    11622  vkMemReq.alignment,
    11623  vkMemReq.memoryTypeBits,
    11624  createInfo.flags,
    11625  createInfo.usage,
    11626  createInfo.requiredFlags,
    11627  createInfo.preferredFlags,
    11628  createInfo.memoryTypeBits,
    11629  createInfo.pool,
    11630  allocation,
    11631  userDataStr.GetString());
    11632  Flush();
    11633 }
    11634 
    11635 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11636  const VkMemoryRequirements& vkMemReq,
    11637  bool requiresDedicatedAllocation,
    11638  bool prefersDedicatedAllocation,
    11639  const VmaAllocationCreateInfo& createInfo,
    11640  VmaAllocation allocation)
    11641 {
    11642  CallParams callParams;
    11643  GetBasicParams(callParams);
    11644 
    11645  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11646  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11647  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11648  vkMemReq.size,
    11649  vkMemReq.alignment,
    11650  vkMemReq.memoryTypeBits,
    11651  requiresDedicatedAllocation ? 1 : 0,
    11652  prefersDedicatedAllocation ? 1 : 0,
    11653  createInfo.flags,
    11654  createInfo.usage,
    11655  createInfo.requiredFlags,
    11656  createInfo.preferredFlags,
    11657  createInfo.memoryTypeBits,
    11658  createInfo.pool,
    11659  allocation,
    11660  userDataStr.GetString());
    11661  Flush();
    11662 }
    11663 
    11664 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11665  const VkMemoryRequirements& vkMemReq,
    11666  bool requiresDedicatedAllocation,
    11667  bool prefersDedicatedAllocation,
    11668  const VmaAllocationCreateInfo& createInfo,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11677  vkMemReq.size,
    11678  vkMemReq.alignment,
    11679  vkMemReq.memoryTypeBits,
    11680  requiresDedicatedAllocation ? 1 : 0,
    11681  prefersDedicatedAllocation ? 1 : 0,
    11682  createInfo.flags,
    11683  createInfo.usage,
    11684  createInfo.requiredFlags,
    11685  createInfo.preferredFlags,
    11686  createInfo.memoryTypeBits,
    11687  createInfo.pool,
    11688  allocation,
    11689  userDataStr.GetString());
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordResizeAllocation(
    11706  uint32_t frameIndex,
    11707  VmaAllocation allocation,
    11708  VkDeviceSize newSize)
    11709 {
    11710  CallParams callParams;
    11711  GetBasicParams(callParams);
    11712 
    11713  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11714  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11715  allocation, newSize);
    11716  Flush();
    11717 }
    11718 
    11719 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11720  VmaAllocation allocation,
    11721  const void* pUserData)
    11722 {
    11723  CallParams callParams;
    11724  GetBasicParams(callParams);
    11725 
    11726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11727  UserDataString userDataStr(
    11728  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11729  pUserData);
    11730  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11731  allocation,
    11732  userDataStr.GetString());
    11733  Flush();
    11734 }
    11735 
    11736 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11737  VmaAllocation allocation)
    11738 {
    11739  CallParams callParams;
    11740  GetBasicParams(callParams);
    11741 
    11742  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11743  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11744  allocation);
    11745  Flush();
    11746 }
    11747 
    11748 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11749  VmaAllocation allocation)
    11750 {
    11751  CallParams callParams;
    11752  GetBasicParams(callParams);
    11753 
    11754  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11755  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11756  allocation);
    11757  Flush();
    11758 }
    11759 
    11760 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11761  VmaAllocation allocation)
    11762 {
    11763  CallParams callParams;
    11764  GetBasicParams(callParams);
    11765 
    11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11767  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11768  allocation);
    11769  Flush();
    11770 }
    11771 
    11772 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11773  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11774 {
    11775  CallParams callParams;
    11776  GetBasicParams(callParams);
    11777 
    11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11779  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11780  allocation,
    11781  offset,
    11782  size);
    11783  Flush();
    11784 }
    11785 
    11786 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11787  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11788 {
    11789  CallParams callParams;
    11790  GetBasicParams(callParams);
    11791 
    11792  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11793  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11794  allocation,
    11795  offset,
    11796  size);
    11797  Flush();
    11798 }
    11799 
    11800 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11801  const VkBufferCreateInfo& bufCreateInfo,
    11802  const VmaAllocationCreateInfo& allocCreateInfo,
    11803  VmaAllocation allocation)
    11804 {
    11805  CallParams callParams;
    11806  GetBasicParams(callParams);
    11807 
    11808  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11809  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11810  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11811  bufCreateInfo.flags,
    11812  bufCreateInfo.size,
    11813  bufCreateInfo.usage,
    11814  bufCreateInfo.sharingMode,
    11815  allocCreateInfo.flags,
    11816  allocCreateInfo.usage,
    11817  allocCreateInfo.requiredFlags,
    11818  allocCreateInfo.preferredFlags,
    11819  allocCreateInfo.memoryTypeBits,
    11820  allocCreateInfo.pool,
    11821  allocation,
    11822  userDataStr.GetString());
    11823  Flush();
    11824 }
    11825 
    11826 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11827  const VkImageCreateInfo& imageCreateInfo,
    11828  const VmaAllocationCreateInfo& allocCreateInfo,
    11829  VmaAllocation allocation)
    11830 {
    11831  CallParams callParams;
    11832  GetBasicParams(callParams);
    11833 
    11834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11835  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11836  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11837  imageCreateInfo.flags,
    11838  imageCreateInfo.imageType,
    11839  imageCreateInfo.format,
    11840  imageCreateInfo.extent.width,
    11841  imageCreateInfo.extent.height,
    11842  imageCreateInfo.extent.depth,
    11843  imageCreateInfo.mipLevels,
    11844  imageCreateInfo.arrayLayers,
    11845  imageCreateInfo.samples,
    11846  imageCreateInfo.tiling,
    11847  imageCreateInfo.usage,
    11848  imageCreateInfo.sharingMode,
    11849  imageCreateInfo.initialLayout,
    11850  allocCreateInfo.flags,
    11851  allocCreateInfo.usage,
    11852  allocCreateInfo.requiredFlags,
    11853  allocCreateInfo.preferredFlags,
    11854  allocCreateInfo.memoryTypeBits,
    11855  allocCreateInfo.pool,
    11856  allocation,
    11857  userDataStr.GetString());
    11858  Flush();
    11859 }
    11860 
    11861 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11862  VmaAllocation allocation)
    11863 {
    11864  CallParams callParams;
    11865  GetBasicParams(callParams);
    11866 
    11867  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11868  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11869  allocation);
    11870  Flush();
    11871 }
    11872 
    11873 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11874  VmaAllocation allocation)
    11875 {
    11876  CallParams callParams;
    11877  GetBasicParams(callParams);
    11878 
    11879  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11880  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11881  allocation);
    11882  Flush();
    11883 }
    11884 
    11885 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11886  VmaAllocation allocation)
    11887 {
    11888  CallParams callParams;
    11889  GetBasicParams(callParams);
    11890 
    11891  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11892  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11893  allocation);
    11894  Flush();
    11895 }
    11896 
    11897 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11898  VmaAllocation allocation)
    11899 {
    11900  CallParams callParams;
    11901  GetBasicParams(callParams);
    11902 
    11903  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11904  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11905  allocation);
    11906  Flush();
    11907 }
    11908 
    11909 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11910  VmaPool pool)
    11911 {
    11912  CallParams callParams;
    11913  GetBasicParams(callParams);
    11914 
    11915  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11916  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11917  pool);
    11918  Flush();
    11919 }
    11920 
    11921 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11922 {
    11923  if(pUserData != VMA_NULL)
    11924  {
    11925  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11926  {
    11927  m_Str = (const char*)pUserData;
    11928  }
    11929  else
    11930  {
    11931  sprintf_s(m_PtrStr, "%p", pUserData);
    11932  m_Str = m_PtrStr;
    11933  }
    11934  }
    11935  else
    11936  {
    11937  m_Str = "";
    11938  }
    11939 }
    11940 
    11941 void VmaRecorder::WriteConfiguration(
    11942  const VkPhysicalDeviceProperties& devProps,
    11943  const VkPhysicalDeviceMemoryProperties& memProps,
    11944  bool dedicatedAllocationExtensionEnabled)
    11945 {
    11946  fprintf(m_File, "Config,Begin\n");
    11947 
    11948  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11949  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11950  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11951  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11952  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11953  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11954 
    11955  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11956  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11957  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11958 
    11959  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11960  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11961  {
    11962  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11964  }
    11965  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11966  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11967  {
    11968  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11970  }
    11971 
    11972  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11973 
    11974  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11981  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11982  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11983 
    11984  fprintf(m_File, "Config,End\n");
    11985 }
    11986 
    11987 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11988 {
    11989  outParams.threadId = GetCurrentThreadId();
    11990 
    11991  LARGE_INTEGER counter;
    11992  QueryPerformanceCounter(&counter);
    11993  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11994 }
    11995 
    11996 void VmaRecorder::Flush()
    11997 {
    11998  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11999  {
    12000  fflush(m_File);
    12001  }
    12002 }
    12003 
    12004 #endif // #if VMA_RECORDING_ENABLED
    12005 
    12007 // VmaAllocator_T
    12008 
    12009 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12010  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12011  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12012  m_hDevice(pCreateInfo->device),
    12013  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12014  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12015  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12016  m_PreferredLargeHeapBlockSize(0),
    12017  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12018  m_CurrentFrameIndex(0),
    12019  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12020  m_NextPoolId(0)
    12022  ,m_pRecorder(VMA_NULL)
    12023 #endif
    12024 {
    12025  if(VMA_DEBUG_DETECT_CORRUPTION)
    12026  {
    12027  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12028  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12029  }
    12030 
    12031  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12032 
    12033 #if !(VMA_DEDICATED_ALLOCATION)
    12035  {
    12036  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12037  }
    12038 #endif
    12039 
    12040  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12041  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12042  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12043 
    12044  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12045  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12046 
    12047  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12048  {
    12049  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12050  }
    12051 
    12052  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12053  {
    12054  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12055  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12056  }
    12057 
    12058  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12059 
    12060  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12062 
    12063  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12065  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12067 
    12068  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12069  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12070 
    12071  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12072  {
    12073  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12074  {
    12075  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12076  if(limit != VK_WHOLE_SIZE)
    12077  {
    12078  m_HeapSizeLimit[heapIndex] = limit;
    12079  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12080  {
    12081  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12082  }
    12083  }
    12084  }
    12085  }
    12086 
    12087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12088  {
    12089  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12090 
    12091  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12092  this,
    12093  memTypeIndex,
    12094  preferredBlockSize,
    12095  0,
    12096  SIZE_MAX,
    12097  GetBufferImageGranularity(),
    12098  pCreateInfo->frameInUseCount,
    12099  false, // isCustomPool
    12100  false, // explicitBlockSize
    12101  false); // linearAlgorithm
    12102  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12103  // becase minBlockCount is 0.
    12104  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12105 
    12106  }
    12107 }
    12108 
    12109 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12110 {
    12111  VkResult res = VK_SUCCESS;
    12112 
    12113  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12114  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12115  {
    12116 #if VMA_RECORDING_ENABLED
    12117  m_pRecorder = vma_new(this, VmaRecorder)();
    12118  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12119  if(res != VK_SUCCESS)
    12120  {
    12121  return res;
    12122  }
    12123  m_pRecorder->WriteConfiguration(
    12124  m_PhysicalDeviceProperties,
    12125  m_MemProps,
    12126  m_UseKhrDedicatedAllocation);
    12127  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12128 #else
    12129  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12130  return VK_ERROR_FEATURE_NOT_PRESENT;
    12131 #endif
    12132  }
    12133 
    12134  return res;
    12135 }
    12136 
    12137 VmaAllocator_T::~VmaAllocator_T()
    12138 {
    12139 #if VMA_RECORDING_ENABLED
    12140  if(m_pRecorder != VMA_NULL)
    12141  {
    12142  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12143  vma_delete(this, m_pRecorder);
    12144  }
    12145 #endif
    12146 
    12147  VMA_ASSERT(m_Pools.empty());
    12148 
    12149  for(size_t i = GetMemoryTypeCount(); i--; )
    12150  {
    12151  vma_delete(this, m_pDedicatedAllocations[i]);
    12152  vma_delete(this, m_pBlockVectors[i]);
    12153  }
    12154 }
    12155 
    12156 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12157 {
    12158 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12159  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12160  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12161  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12162  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12163  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12164  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12165  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12166  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12167  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12168  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12169  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12170  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12171  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12172  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12173  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12174  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12175 #if VMA_DEDICATED_ALLOCATION
    12176  if(m_UseKhrDedicatedAllocation)
    12177  {
    12178  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12179  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12180  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12181  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12182  }
    12183 #endif // #if VMA_DEDICATED_ALLOCATION
    12184 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12185 
    12186 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12187  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12188 
    12189  if(pVulkanFunctions != VMA_NULL)
    12190  {
    12191  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12194  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12198  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12200  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12202  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12204  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12206  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12207 #if VMA_DEDICATED_ALLOCATION
    12208  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12209  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12210 #endif
    12211  }
    12212 
    12213 #undef VMA_COPY_IF_NOT_NULL
    12214 
    12215  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12216  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12233 #if VMA_DEDICATED_ALLOCATION
    12234  if(m_UseKhrDedicatedAllocation)
    12235  {
    12236  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12238  }
    12239 #endif
    12240 }
    12241 
    12242 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12243 {
    12244  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12245  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12246  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12247  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12248 }
    12249 
    12250 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12251  VkDeviceSize size,
    12252  VkDeviceSize alignment,
    12253  bool dedicatedAllocation,
    12254  VkBuffer dedicatedBuffer,
    12255  VkImage dedicatedImage,
    12256  const VmaAllocationCreateInfo& createInfo,
    12257  uint32_t memTypeIndex,
    12258  VmaSuballocationType suballocType,
    12259  VmaAllocation* pAllocation)
    12260 {
    12261  VMA_ASSERT(pAllocation != VMA_NULL);
    12262  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12263 
    12264  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12265 
    12266  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12267  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12268  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12269  {
    12270  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12271  }
    12272 
    12273  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12274  VMA_ASSERT(blockVector);
    12275 
    12276  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12277  bool preferDedicatedMemory =
    12278  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12279  dedicatedAllocation ||
    12280  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12281  size > preferredBlockSize / 2;
    12282 
    12283  if(preferDedicatedMemory &&
    12284  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12285  finalCreateInfo.pool == VK_NULL_HANDLE)
    12286  {
    12288  }
    12289 
    12290  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12291  {
    12292  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12293  {
    12294  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12295  }
    12296  else
    12297  {
    12298  return AllocateDedicatedMemory(
    12299  size,
    12300  suballocType,
    12301  memTypeIndex,
    12302  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12304  finalCreateInfo.pUserData,
    12305  dedicatedBuffer,
    12306  dedicatedImage,
    12307  pAllocation);
    12308  }
    12309  }
    12310  else
    12311  {
    12312  VkResult res = blockVector->Allocate(
    12313  VK_NULL_HANDLE, // hCurrentPool
    12314  m_CurrentFrameIndex.load(),
    12315  size,
    12316  alignment,
    12317  finalCreateInfo,
    12318  suballocType,
    12319  pAllocation);
    12320  if(res == VK_SUCCESS)
    12321  {
    12322  return res;
    12323  }
    12324 
    12325  // 5. Try dedicated memory.
    12326  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12327  {
    12328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12329  }
    12330  else
    12331  {
    12332  res = AllocateDedicatedMemory(
    12333  size,
    12334  suballocType,
    12335  memTypeIndex,
    12336  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12338  finalCreateInfo.pUserData,
    12339  dedicatedBuffer,
    12340  dedicatedImage,
    12341  pAllocation);
    12342  if(res == VK_SUCCESS)
    12343  {
    12344  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12345  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12346  return VK_SUCCESS;
    12347  }
    12348  else
    12349  {
    12350  // Everything failed: Return error code.
    12351  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12352  return res;
    12353  }
    12354  }
    12355  }
    12356 }
    12357 
    12358 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12359  VkDeviceSize size,
    12360  VmaSuballocationType suballocType,
    12361  uint32_t memTypeIndex,
    12362  bool map,
    12363  bool isUserDataString,
    12364  void* pUserData,
    12365  VkBuffer dedicatedBuffer,
    12366  VkImage dedicatedImage,
    12367  VmaAllocation* pAllocation)
    12368 {
    12369  VMA_ASSERT(pAllocation);
    12370 
    12371  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12372  allocInfo.memoryTypeIndex = memTypeIndex;
    12373  allocInfo.allocationSize = size;
    12374 
    12375 #if VMA_DEDICATED_ALLOCATION
    12376  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12377  if(m_UseKhrDedicatedAllocation)
    12378  {
    12379  if(dedicatedBuffer != VK_NULL_HANDLE)
    12380  {
    12381  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12382  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  else if(dedicatedImage != VK_NULL_HANDLE)
    12386  {
    12387  dedicatedAllocInfo.image = dedicatedImage;
    12388  allocInfo.pNext = &dedicatedAllocInfo;
    12389  }
    12390  }
    12391 #endif // #if VMA_DEDICATED_ALLOCATION
    12392 
    12393  // Allocate VkDeviceMemory.
    12394  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12395  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12396  if(res < 0)
    12397  {
    12398  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12399  return res;
    12400  }
    12401 
    12402  void* pMappedData = VMA_NULL;
    12403  if(map)
    12404  {
    12405  res = (*m_VulkanFunctions.vkMapMemory)(
    12406  m_hDevice,
    12407  hMemory,
    12408  0,
    12409  VK_WHOLE_SIZE,
    12410  0,
    12411  &pMappedData);
    12412  if(res < 0)
    12413  {
    12414  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12415  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12416  return res;
    12417  }
    12418  }
    12419 
    12420  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12421  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12422  (*pAllocation)->SetUserData(this, pUserData);
    12423  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12424  {
    12425  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12426  }
    12427 
    12428  // Register it in m_pDedicatedAllocations.
    12429  {
    12430  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12431  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12432  VMA_ASSERT(pDedicatedAllocations);
    12433  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12434  }
    12435 
    12436  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12437 
    12438  return VK_SUCCESS;
    12439 }
    12440 
    12441 void VmaAllocator_T::GetBufferMemoryRequirements(
    12442  VkBuffer hBuffer,
    12443  VkMemoryRequirements& memReq,
    12444  bool& requiresDedicatedAllocation,
    12445  bool& prefersDedicatedAllocation) const
    12446 {
    12447 #if VMA_DEDICATED_ALLOCATION
    12448  if(m_UseKhrDedicatedAllocation)
    12449  {
    12450  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12451  memReqInfo.buffer = hBuffer;
    12452 
    12453  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12454 
    12455  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12456  memReq2.pNext = &memDedicatedReq;
    12457 
    12458  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12459 
    12460  memReq = memReq2.memoryRequirements;
    12461  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12462  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12463  }
    12464  else
    12465 #endif // #if VMA_DEDICATED_ALLOCATION
    12466  {
    12467  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12468  requiresDedicatedAllocation = false;
    12469  prefersDedicatedAllocation = false;
    12470  }
    12471 }
    12472 
    12473 void VmaAllocator_T::GetImageMemoryRequirements(
    12474  VkImage hImage,
    12475  VkMemoryRequirements& memReq,
    12476  bool& requiresDedicatedAllocation,
    12477  bool& prefersDedicatedAllocation) const
    12478 {
    12479 #if VMA_DEDICATED_ALLOCATION
    12480  if(m_UseKhrDedicatedAllocation)
    12481  {
    12482  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12483  memReqInfo.image = hImage;
    12484 
    12485  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12486 
    12487  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12488  memReq2.pNext = &memDedicatedReq;
    12489 
    12490  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12491 
    12492  memReq = memReq2.memoryRequirements;
    12493  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12494  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12495  }
    12496  else
    12497 #endif // #if VMA_DEDICATED_ALLOCATION
    12498  {
    12499  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12500  requiresDedicatedAllocation = false;
    12501  prefersDedicatedAllocation = false;
    12502  }
    12503 }
    12504 
    12505 VkResult VmaAllocator_T::AllocateMemory(
    12506  const VkMemoryRequirements& vkMemReq,
    12507  bool requiresDedicatedAllocation,
    12508  bool prefersDedicatedAllocation,
    12509  VkBuffer dedicatedBuffer,
    12510  VkImage dedicatedImage,
    12511  const VmaAllocationCreateInfo& createInfo,
    12512  VmaSuballocationType suballocType,
    12513  VmaAllocation* pAllocation)
    12514 {
    12515  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12516 
    12517  if(vkMemReq.size == 0)
    12518  {
    12519  return VK_ERROR_VALIDATION_FAILED_EXT;
    12520  }
    12521  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12522  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12523  {
    12524  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12525  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12526  }
    12527  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12529  {
    12530  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12531  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12532  }
    12533  if(requiresDedicatedAllocation)
    12534  {
    12535  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12536  {
    12537  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  if(createInfo.pool != VK_NULL_HANDLE)
    12541  {
    12542  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12543  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12544  }
    12545  }
    12546  if((createInfo.pool != VK_NULL_HANDLE) &&
    12547  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12548  {
    12549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12551  }
    12552 
    12553  if(createInfo.pool != VK_NULL_HANDLE)
    12554  {
    12555  const VkDeviceSize alignmentForPool = VMA_MAX(
    12556  vkMemReq.alignment,
    12557  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12558  return createInfo.pool->m_BlockVector.Allocate(
    12559  createInfo.pool,
    12560  m_CurrentFrameIndex.load(),
    12561  vkMemReq.size,
    12562  alignmentForPool,
    12563  createInfo,
    12564  suballocType,
    12565  pAllocation);
    12566  }
    12567  else
    12568  {
    12569  // Bit mask of memory Vulkan types acceptable for this allocation.
    12570  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12571  uint32_t memTypeIndex = UINT32_MAX;
    12572  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12573  if(res == VK_SUCCESS)
    12574  {
    12575  VkDeviceSize alignmentForMemType = VMA_MAX(
    12576  vkMemReq.alignment,
    12577  GetMemoryTypeMinAlignment(memTypeIndex));
    12578 
    12579  res = AllocateMemoryOfType(
    12580  vkMemReq.size,
    12581  alignmentForMemType,
    12582  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12583  dedicatedBuffer,
    12584  dedicatedImage,
    12585  createInfo,
    12586  memTypeIndex,
    12587  suballocType,
    12588  pAllocation);
    12589  // Succeeded on first try.
    12590  if(res == VK_SUCCESS)
    12591  {
    12592  return res;
    12593  }
    12594  // Allocation from this memory type failed. Try other compatible memory types.
    12595  else
    12596  {
    12597  for(;;)
    12598  {
    12599  // Remove old memTypeIndex from list of possibilities.
    12600  memoryTypeBits &= ~(1u << memTypeIndex);
    12601  // Find alternative memTypeIndex.
    12602  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12603  if(res == VK_SUCCESS)
    12604  {
    12605  alignmentForMemType = VMA_MAX(
    12606  vkMemReq.alignment,
    12607  GetMemoryTypeMinAlignment(memTypeIndex));
    12608 
    12609  res = AllocateMemoryOfType(
    12610  vkMemReq.size,
    12611  alignmentForMemType,
    12612  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12613  dedicatedBuffer,
    12614  dedicatedImage,
    12615  createInfo,
    12616  memTypeIndex,
    12617  suballocType,
    12618  pAllocation);
    12619  // Allocation from this alternative memory type succeeded.
    12620  if(res == VK_SUCCESS)
    12621  {
    12622  return res;
    12623  }
    12624  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12625  }
    12626  // No other matching memory type index could be found.
    12627  else
    12628  {
    12629  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12630  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12631  }
    12632  }
    12633  }
    12634  }
    12635  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12636  else
    12637  return res;
    12638  }
    12639 }
    12640 
    12641 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12642 {
    12643  VMA_ASSERT(allocation);
    12644 
    12645  if(TouchAllocation(allocation))
    12646  {
    12647  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12648  {
    12649  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12650  }
    12651 
    12652  switch(allocation->GetType())
    12653  {
    12654  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12655  {
    12656  VmaBlockVector* pBlockVector = VMA_NULL;
    12657  VmaPool hPool = allocation->GetPool();
    12658  if(hPool != VK_NULL_HANDLE)
    12659  {
    12660  pBlockVector = &hPool->m_BlockVector;
    12661  }
    12662  else
    12663  {
    12664  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12665  pBlockVector = m_pBlockVectors[memTypeIndex];
    12666  }
    12667  pBlockVector->Free(allocation);
    12668  }
    12669  break;
    12670  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12671  FreeDedicatedMemory(allocation);
    12672  break;
    12673  default:
    12674  VMA_ASSERT(0);
    12675  }
    12676  }
    12677 
    12678  allocation->SetUserData(this, VMA_NULL);
    12679  vma_delete(this, allocation);
    12680 }
    12681 
    12682 VkResult VmaAllocator_T::ResizeAllocation(
    12683  const VmaAllocation alloc,
    12684  VkDeviceSize newSize)
    12685 {
    12686  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12687  {
    12688  return VK_ERROR_VALIDATION_FAILED_EXT;
    12689  }
    12690  if(newSize == alloc->GetSize())
    12691  {
    12692  return VK_SUCCESS;
    12693  }
    12694 
    12695  switch(alloc->GetType())
    12696  {
    12697  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12698  return VK_ERROR_FEATURE_NOT_PRESENT;
    12699  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12700  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12701  {
    12702  alloc->ChangeSize(newSize);
    12703  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12704  return VK_SUCCESS;
    12705  }
    12706  else
    12707  {
    12708  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12709  }
    12710  default:
    12711  VMA_ASSERT(0);
    12712  return VK_ERROR_VALIDATION_FAILED_EXT;
    12713  }
    12714 }
    12715 
    12716 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12717 {
    12718  // Initialize.
    12719  InitStatInfo(pStats->total);
    12720  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12721  InitStatInfo(pStats->memoryType[i]);
    12722  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12723  InitStatInfo(pStats->memoryHeap[i]);
    12724 
    12725  // Process default pools.
    12726  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12727  {
    12728  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12729  VMA_ASSERT(pBlockVector);
    12730  pBlockVector->AddStats(pStats);
    12731  }
    12732 
    12733  // Process custom pools.
    12734  {
    12735  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12736  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12737  {
    12738  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12739  }
    12740  }
    12741 
    12742  // Process dedicated allocations.
    12743  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12744  {
    12745  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12746  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12747  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12748  VMA_ASSERT(pDedicatedAllocVector);
    12749  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12750  {
    12751  VmaStatInfo allocationStatInfo;
    12752  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12753  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12754  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12756  }
    12757  }
    12758 
    12759  // Postprocess.
    12760  VmaPostprocessCalcStatInfo(pStats->total);
    12761  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12762  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12763  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12764  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12765 }
    12766 
    12767 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12768 
    12769 VkResult VmaAllocator_T::Defragment(
    12770  VmaAllocation* pAllocations,
    12771  size_t allocationCount,
    12772  VkBool32* pAllocationsChanged,
    12773  const VmaDefragmentationInfo* pDefragmentationInfo,
    12774  VmaDefragmentationStats* pDefragmentationStats)
    12775 {
    12776  if(pAllocationsChanged != VMA_NULL)
    12777  {
    12778  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12779  }
    12780  if(pDefragmentationStats != VMA_NULL)
    12781  {
    12782  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12783  }
    12784 
    12785  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12786 
    12787  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12788 
    12789  const size_t poolCount = m_Pools.size();
    12790 
    12791  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12792  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12793  {
    12794  VmaAllocation hAlloc = pAllocations[allocIndex];
    12795  VMA_ASSERT(hAlloc);
    12796  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12797  // DedicatedAlloc cannot be defragmented.
    12798  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12799  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12800  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12801  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12802  // Lost allocation cannot be defragmented.
    12803  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12804  {
    12805  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12806 
    12807  const VmaPool hAllocPool = hAlloc->GetPool();
    12808  // This allocation belongs to custom pool.
    12809  if(hAllocPool != VK_NULL_HANDLE)
    12810  {
    12811  // Pools with linear or buddy algorithm are not defragmented.
    12812  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12813  {
    12814  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12815  }
    12816  }
    12817  // This allocation belongs to general pool.
    12818  else
    12819  {
    12820  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12821  }
    12822 
    12823  if(pAllocBlockVector != VMA_NULL)
    12824  {
    12825  VmaDefragmentator* const pDefragmentator =
    12826  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12827  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12828  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12829  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12830  }
    12831  }
    12832  }
    12833 
    12834  VkResult result = VK_SUCCESS;
    12835 
    12836  // ======== Main processing.
    12837 
    12838  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12839  uint32_t maxAllocationsToMove = UINT32_MAX;
    12840  if(pDefragmentationInfo != VMA_NULL)
    12841  {
    12842  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12843  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12844  }
    12845 
    12846  // Process standard memory.
    12847  for(uint32_t memTypeIndex = 0;
    12848  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12849  ++memTypeIndex)
    12850  {
    12851  // Only HOST_VISIBLE memory types can be defragmented.
    12852  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12853  {
    12854  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12855  pDefragmentationStats,
    12856  maxBytesToMove,
    12857  maxAllocationsToMove);
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12863  {
    12864  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12865  pDefragmentationStats,
    12866  maxBytesToMove,
    12867  maxAllocationsToMove);
    12868  }
    12869 
    12870  // ======== Destroy defragmentators.
    12871 
    12872  // Process custom pools.
    12873  for(size_t poolIndex = poolCount; poolIndex--; )
    12874  {
    12875  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12876  }
    12877 
    12878  // Process standard memory.
    12879  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12880  {
    12881  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12882  {
    12883  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12884  }
    12885  }
    12886 
    12887  return result;
    12888 }
    12889 
    12890 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12891 {
    12892  if(hAllocation->CanBecomeLost())
    12893  {
    12894  /*
    12895  Warning: This is a carefully designed algorithm.
    12896  Do not modify unless you really know what you're doing :)
    12897  */
    12898  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12899  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12900  for(;;)
    12901  {
    12902  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12903  {
    12904  pAllocationInfo->memoryType = UINT32_MAX;
    12905  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12906  pAllocationInfo->offset = 0;
    12907  pAllocationInfo->size = hAllocation->GetSize();
    12908  pAllocationInfo->pMappedData = VMA_NULL;
    12909  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12910  return;
    12911  }
    12912  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12913  {
    12914  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12915  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12916  pAllocationInfo->offset = hAllocation->GetOffset();
    12917  pAllocationInfo->size = hAllocation->GetSize();
    12918  pAllocationInfo->pMappedData = VMA_NULL;
    12919  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12920  return;
    12921  }
    12922  else // Last use time earlier than current time.
    12923  {
    12924  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12925  {
    12926  localLastUseFrameIndex = localCurrFrameIndex;
    12927  }
    12928  }
    12929  }
    12930  }
    12931  else
    12932  {
    12933 #if VMA_STATS_STRING_ENABLED
    12934  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12935  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12936  for(;;)
    12937  {
    12938  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12939  if(localLastUseFrameIndex == localCurrFrameIndex)
    12940  {
    12941  break;
    12942  }
    12943  else // Last use time earlier than current time.
    12944  {
    12945  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12946  {
    12947  localLastUseFrameIndex = localCurrFrameIndex;
    12948  }
    12949  }
    12950  }
    12951 #endif
    12952 
    12953  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12954  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12955  pAllocationInfo->offset = hAllocation->GetOffset();
    12956  pAllocationInfo->size = hAllocation->GetSize();
    12957  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12958  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12959  }
    12960 }
    12961 
    12962 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12963 {
    12964  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12965  if(hAllocation->CanBecomeLost())
    12966  {
    12967  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12968  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12969  for(;;)
    12970  {
    12971  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12972  {
    12973  return false;
    12974  }
    12975  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12976  {
    12977  return true;
    12978  }
    12979  else // Last use time earlier than current time.
    12980  {
    12981  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12982  {
    12983  localLastUseFrameIndex = localCurrFrameIndex;
    12984  }
    12985  }
    12986  }
    12987  }
    12988  else
    12989  {
    12990 #if VMA_STATS_STRING_ENABLED
    12991  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12992  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12993  for(;;)
    12994  {
    12995  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12996  if(localLastUseFrameIndex == localCurrFrameIndex)
    12997  {
    12998  break;
    12999  }
    13000  else // Last use time earlier than current time.
    13001  {
    13002  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13003  {
    13004  localLastUseFrameIndex = localCurrFrameIndex;
    13005  }
    13006  }
    13007  }
    13008 #endif
    13009 
    13010  return true;
    13011  }
    13012 }
    13013 
    13014 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13015 {
    13016  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13017 
    13018  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13019 
    13020  if(newCreateInfo.maxBlockCount == 0)
    13021  {
    13022  newCreateInfo.maxBlockCount = SIZE_MAX;
    13023  }
    13024  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13025  {
    13026  return VK_ERROR_INITIALIZATION_FAILED;
    13027  }
    13028 
    13029  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13030 
    13031  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13032 
    13033  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13034  if(res != VK_SUCCESS)
    13035  {
    13036  vma_delete(this, *pPool);
    13037  *pPool = VMA_NULL;
    13038  return res;
    13039  }
    13040 
    13041  // Add to m_Pools.
    13042  {
    13043  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13044  (*pPool)->SetId(m_NextPoolId++);
    13045  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13046  }
    13047 
    13048  return VK_SUCCESS;
    13049 }
    13050 
    13051 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13052 {
    13053  // Remove from m_Pools.
    13054  {
    13055  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13056  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13057  VMA_ASSERT(success && "Pool not found in Allocator.");
    13058  }
    13059 
    13060  vma_delete(this, pool);
    13061 }
    13062 
    13063 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13064 {
    13065  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13066 }
    13067 
    13068 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13069 {
    13070  m_CurrentFrameIndex.store(frameIndex);
    13071 }
    13072 
    13073 void VmaAllocator_T::MakePoolAllocationsLost(
    13074  VmaPool hPool,
    13075  size_t* pLostAllocationCount)
    13076 {
    13077  hPool->m_BlockVector.MakePoolAllocationsLost(
    13078  m_CurrentFrameIndex.load(),
    13079  pLostAllocationCount);
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13083 {
    13084  return hPool->m_BlockVector.CheckCorruption();
    13085 }
    13086 
    13087 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13088 {
    13089  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13090 
    13091  // Process default pools.
    13092  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13093  {
    13094  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13095  {
    13096  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13097  VMA_ASSERT(pBlockVector);
    13098  VkResult localRes = pBlockVector->CheckCorruption();
    13099  switch(localRes)
    13100  {
    13101  case VK_ERROR_FEATURE_NOT_PRESENT:
    13102  break;
    13103  case VK_SUCCESS:
    13104  finalRes = VK_SUCCESS;
    13105  break;
    13106  default:
    13107  return localRes;
    13108  }
    13109  }
    13110  }
    13111 
    13112  // Process custom pools.
    13113  {
    13114  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13115  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13116  {
    13117  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13118  {
    13119  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13120  switch(localRes)
    13121  {
    13122  case VK_ERROR_FEATURE_NOT_PRESENT:
    13123  break;
    13124  case VK_SUCCESS:
    13125  finalRes = VK_SUCCESS;
    13126  break;
    13127  default:
    13128  return localRes;
    13129  }
    13130  }
    13131  }
    13132  }
    13133 
    13134  return finalRes;
    13135 }
    13136 
    13137 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13138 {
    13139  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13140  (*pAllocation)->InitLost();
    13141 }
    13142 
    13143 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13144 {
    13145  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13146 
    13147  VkResult res;
    13148  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13149  {
    13150  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13151  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13152  {
    13153  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13154  if(res == VK_SUCCESS)
    13155  {
    13156  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13162  }
    13163  }
    13164  else
    13165  {
    13166  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13167  }
    13168 
    13169  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13170  {
    13171  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13172  }
    13173 
    13174  return res;
    13175 }
    13176 
    13177 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13178 {
    13179  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13180  {
    13181  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13182  }
    13183 
    13184  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13185 
    13186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13187  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13188  {
    13189  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13190  m_HeapSizeLimit[heapIndex] += size;
    13191  }
    13192 }
    13193 
    13194 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13195 {
    13196  if(hAllocation->CanBecomeLost())
    13197  {
    13198  return VK_ERROR_MEMORY_MAP_FAILED;
    13199  }
    13200 
    13201  switch(hAllocation->GetType())
    13202  {
    13203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13204  {
    13205  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13206  char *pBytes = VMA_NULL;
    13207  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13208  if(res == VK_SUCCESS)
    13209  {
    13210  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13211  hAllocation->BlockAllocMap();
    13212  }
    13213  return res;
    13214  }
    13215  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13216  return hAllocation->DedicatedAllocMap(this, ppData);
    13217  default:
    13218  VMA_ASSERT(0);
    13219  return VK_ERROR_MEMORY_MAP_FAILED;
    13220  }
    13221 }
    13222 
    13223 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13224 {
    13225  switch(hAllocation->GetType())
    13226  {
    13227  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13228  {
    13229  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13230  hAllocation->BlockAllocUnmap();
    13231  pBlock->Unmap(this, 1);
    13232  }
    13233  break;
    13234  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13235  hAllocation->DedicatedAllocUnmap(this);
    13236  break;
    13237  default:
    13238  VMA_ASSERT(0);
    13239  }
    13240 }
    13241 
    13242 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13243 {
    13244  VkResult res = VK_SUCCESS;
    13245  switch(hAllocation->GetType())
    13246  {
    13247  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13248  res = GetVulkanFunctions().vkBindBufferMemory(
    13249  m_hDevice,
    13250  hBuffer,
    13251  hAllocation->GetMemory(),
    13252  0); //memoryOffset
    13253  break;
    13254  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13255  {
    13256  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13257  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13258  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13259  break;
    13260  }
    13261  default:
    13262  VMA_ASSERT(0);
    13263  }
    13264  return res;
    13265 }
    13266 
    13267 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13268 {
    13269  VkResult res = VK_SUCCESS;
    13270  switch(hAllocation->GetType())
    13271  {
    13272  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13273  res = GetVulkanFunctions().vkBindImageMemory(
    13274  m_hDevice,
    13275  hImage,
    13276  hAllocation->GetMemory(),
    13277  0); //memoryOffset
    13278  break;
    13279  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13280  {
    13281  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13282  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13283  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13284  break;
    13285  }
    13286  default:
    13287  VMA_ASSERT(0);
    13288  }
    13289  return res;
    13290 }
    13291 
    13292 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13293  VmaAllocation hAllocation,
    13294  VkDeviceSize offset, VkDeviceSize size,
    13295  VMA_CACHE_OPERATION op)
    13296 {
    13297  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13298  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13299  {
    13300  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13301  VMA_ASSERT(offset <= allocationSize);
    13302 
    13303  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13304 
    13305  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13306  memRange.memory = hAllocation->GetMemory();
    13307 
    13308  switch(hAllocation->GetType())
    13309  {
    13310  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13311  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13312  if(size == VK_WHOLE_SIZE)
    13313  {
    13314  memRange.size = allocationSize - memRange.offset;
    13315  }
    13316  else
    13317  {
    13318  VMA_ASSERT(offset + size <= allocationSize);
    13319  memRange.size = VMA_MIN(
    13320  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13321  allocationSize - memRange.offset);
    13322  }
    13323  break;
    13324 
    13325  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13326  {
    13327  // 1. Still within this allocation.
    13328  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13329  if(size == VK_WHOLE_SIZE)
    13330  {
    13331  size = allocationSize - offset;
    13332  }
    13333  else
    13334  {
    13335  VMA_ASSERT(offset + size <= allocationSize);
    13336  }
    13337  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13338 
    13339  // 2. Adjust to whole block.
    13340  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13341  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13342  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13343  memRange.offset += allocationOffset;
    13344  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13345 
    13346  break;
    13347  }
    13348 
    13349  default:
    13350  VMA_ASSERT(0);
    13351  }
    13352 
    13353  switch(op)
    13354  {
    13355  case VMA_CACHE_FLUSH:
    13356  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13357  break;
    13358  case VMA_CACHE_INVALIDATE:
    13359  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13360  break;
    13361  default:
    13362  VMA_ASSERT(0);
    13363  }
    13364  }
    13365  // else: Just ignore this call.
    13366 }
    13367 
    13368 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13369 {
    13370  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13371 
    13372  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13373  {
    13374  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13375  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13376  VMA_ASSERT(pDedicatedAllocations);
    13377  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13378  VMA_ASSERT(success);
    13379  }
    13380 
    13381  VkDeviceMemory hMemory = allocation->GetMemory();
    13382 
    13383  /*
    13384  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13385  before vkFreeMemory.
    13386 
    13387  if(allocation->GetMappedData() != VMA_NULL)
    13388  {
    13389  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13390  }
    13391  */
    13392 
    13393  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13394 
    13395  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13396 }
    13397 
    13398 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13399 {
    13400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13401  !hAllocation->CanBecomeLost() &&
    13402  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13403  {
    13404  void* pData = VMA_NULL;
    13405  VkResult res = Map(hAllocation, &pData);
    13406  if(res == VK_SUCCESS)
    13407  {
    13408  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13409  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13410  Unmap(hAllocation);
    13411  }
    13412  else
    13413  {
    13414  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13415  }
    13416  }
    13417 }
    13418 
    13419 #if VMA_STATS_STRING_ENABLED
    13420 
    13421 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13422 {
    13423  bool dedicatedAllocationsStarted = false;
    13424  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13425  {
    13426  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13427  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13428  VMA_ASSERT(pDedicatedAllocVector);
    13429  if(pDedicatedAllocVector->empty() == false)
    13430  {
    13431  if(dedicatedAllocationsStarted == false)
    13432  {
    13433  dedicatedAllocationsStarted = true;
    13434  json.WriteString("DedicatedAllocations");
    13435  json.BeginObject();
    13436  }
    13437 
    13438  json.BeginString("Type ");
    13439  json.ContinueString(memTypeIndex);
    13440  json.EndString();
    13441 
    13442  json.BeginArray();
    13443 
    13444  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13445  {
    13446  json.BeginObject(true);
    13447  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13448  hAlloc->PrintParameters(json);
    13449  json.EndObject();
    13450  }
    13451 
    13452  json.EndArray();
    13453  }
    13454  }
    13455  if(dedicatedAllocationsStarted)
    13456  {
    13457  json.EndObject();
    13458  }
    13459 
    13460  {
    13461  bool allocationsStarted = false;
    13462  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13463  {
    13464  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13465  {
    13466  if(allocationsStarted == false)
    13467  {
    13468  allocationsStarted = true;
    13469  json.WriteString("DefaultPools");
    13470  json.BeginObject();
    13471  }
    13472 
    13473  json.BeginString("Type ");
    13474  json.ContinueString(memTypeIndex);
    13475  json.EndString();
    13476 
    13477  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13478  }
    13479  }
    13480  if(allocationsStarted)
    13481  {
    13482  json.EndObject();
    13483  }
    13484  }
    13485 
    13486  // Custom pools
    13487  {
    13488  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13489  const size_t poolCount = m_Pools.size();
    13490  if(poolCount > 0)
    13491  {
    13492  json.WriteString("Pools");
    13493  json.BeginObject();
    13494  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13495  {
    13496  json.BeginString();
    13497  json.ContinueString(m_Pools[poolIndex]->GetId());
    13498  json.EndString();
    13499 
    13500  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13501  }
    13502  json.EndObject();
    13503  }
    13504  }
    13505 }
    13506 
    13507 #endif // #if VMA_STATS_STRING_ENABLED
    13508 
    13510 // Public interface
    13511 
    13512 VkResult vmaCreateAllocator(
    13513  const VmaAllocatorCreateInfo* pCreateInfo,
    13514  VmaAllocator* pAllocator)
    13515 {
    13516  VMA_ASSERT(pCreateInfo && pAllocator);
    13517  VMA_DEBUG_LOG("vmaCreateAllocator");
    13518  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13519  return (*pAllocator)->Init(pCreateInfo);
    13520 }
    13521 
    13522 void vmaDestroyAllocator(
    13523  VmaAllocator allocator)
    13524 {
    13525  if(allocator != VK_NULL_HANDLE)
    13526  {
    13527  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13528  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13529  vma_delete(&allocationCallbacks, allocator);
    13530  }
    13531 }
    13532 
    13534  VmaAllocator allocator,
    13535  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13536 {
    13537  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13538  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13544 {
    13545  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13546  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13547 }
    13548 
    13550  VmaAllocator allocator,
    13551  uint32_t memoryTypeIndex,
    13552  VkMemoryPropertyFlags* pFlags)
    13553 {
    13554  VMA_ASSERT(allocator && pFlags);
    13555  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13556  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13557 }
    13558 
    13560  VmaAllocator allocator,
    13561  uint32_t frameIndex)
    13562 {
    13563  VMA_ASSERT(allocator);
    13564  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13565 
    13566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13567 
    13568  allocator->SetCurrentFrameIndex(frameIndex);
    13569 }
    13570 
    13571 void vmaCalculateStats(
    13572  VmaAllocator allocator,
    13573  VmaStats* pStats)
    13574 {
    13575  VMA_ASSERT(allocator && pStats);
    13576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13577  allocator->CalculateStats(pStats);
    13578 }
    13579 
    13580 #if VMA_STATS_STRING_ENABLED
    13581 
    13582 void vmaBuildStatsString(
    13583  VmaAllocator allocator,
    13584  char** ppStatsString,
    13585  VkBool32 detailedMap)
    13586 {
    13587  VMA_ASSERT(allocator && ppStatsString);
    13588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13589 
    13590  VmaStringBuilder sb(allocator);
    13591  {
    13592  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13593  json.BeginObject();
    13594 
    13595  VmaStats stats;
    13596  allocator->CalculateStats(&stats);
    13597 
    13598  json.WriteString("Total");
    13599  VmaPrintStatInfo(json, stats.total);
    13600 
    13601  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13602  {
    13603  json.BeginString("Heap ");
    13604  json.ContinueString(heapIndex);
    13605  json.EndString();
    13606  json.BeginObject();
    13607 
    13608  json.WriteString("Size");
    13609  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13610 
    13611  json.WriteString("Flags");
    13612  json.BeginArray(true);
    13613  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13614  {
    13615  json.WriteString("DEVICE_LOCAL");
    13616  }
    13617  json.EndArray();
    13618 
    13619  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13620  {
    13621  json.WriteString("Stats");
    13622  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13623  }
    13624 
    13625  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13626  {
    13627  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13628  {
    13629  json.BeginString("Type ");
    13630  json.ContinueString(typeIndex);
    13631  json.EndString();
    13632 
    13633  json.BeginObject();
    13634 
    13635  json.WriteString("Flags");
    13636  json.BeginArray(true);
    13637  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13638  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13639  {
    13640  json.WriteString("DEVICE_LOCAL");
    13641  }
    13642  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13643  {
    13644  json.WriteString("HOST_VISIBLE");
    13645  }
    13646  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13647  {
    13648  json.WriteString("HOST_COHERENT");
    13649  }
    13650  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13651  {
    13652  json.WriteString("HOST_CACHED");
    13653  }
    13654  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13655  {
    13656  json.WriteString("LAZILY_ALLOCATED");
    13657  }
    13658  json.EndArray();
    13659 
    13660  if(stats.memoryType[typeIndex].blockCount > 0)
    13661  {
    13662  json.WriteString("Stats");
    13663  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13664  }
    13665 
    13666  json.EndObject();
    13667  }
    13668  }
    13669 
    13670  json.EndObject();
    13671  }
    13672  if(detailedMap == VK_TRUE)
    13673  {
    13674  allocator->PrintDetailedMap(json);
    13675  }
    13676 
    13677  json.EndObject();
    13678  }
    13679 
    13680  const size_t len = sb.GetLength();
    13681  char* const pChars = vma_new_array(allocator, char, len + 1);
    13682  if(len > 0)
    13683  {
    13684  memcpy(pChars, sb.GetData(), len);
    13685  }
    13686  pChars[len] = '\0';
    13687  *ppStatsString = pChars;
    13688 }
    13689 
    13690 void vmaFreeStatsString(
    13691  VmaAllocator allocator,
    13692  char* pStatsString)
    13693 {
    13694  if(pStatsString != VMA_NULL)
    13695  {
    13696  VMA_ASSERT(allocator);
    13697  size_t len = strlen(pStatsString);
    13698  vma_delete_array(allocator, pStatsString, len + 1);
    13699  }
    13700 }
    13701 
    13702 #endif // #if VMA_STATS_STRING_ENABLED
    13703 
    13704 /*
    13705 This function is not protected by any mutex because it just reads immutable data.
    13706 */
    13707 VkResult vmaFindMemoryTypeIndex(
    13708  VmaAllocator allocator,
    13709  uint32_t memoryTypeBits,
    13710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13711  uint32_t* pMemoryTypeIndex)
    13712 {
    13713  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13714  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13715  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13716 
    13717  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13718  {
    13719  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13720  }
    13721 
    13722  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13723  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13724 
    13725  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13726  if(mapped)
    13727  {
    13728  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13729  }
    13730 
    13731  // Convert usage to requiredFlags and preferredFlags.
    13732  switch(pAllocationCreateInfo->usage)
    13733  {
    13735  break;
    13737  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13738  {
    13739  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13740  }
    13741  break;
    13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13744  break;
    13746  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13747  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13748  {
    13749  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13750  }
    13751  break;
    13753  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13754  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13755  break;
    13756  default:
    13757  break;
    13758  }
    13759 
    13760  *pMemoryTypeIndex = UINT32_MAX;
    13761  uint32_t minCost = UINT32_MAX;
    13762  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13763  memTypeIndex < allocator->GetMemoryTypeCount();
    13764  ++memTypeIndex, memTypeBit <<= 1)
    13765  {
    13766  // This memory type is acceptable according to memoryTypeBits bitmask.
    13767  if((memTypeBit & memoryTypeBits) != 0)
    13768  {
    13769  const VkMemoryPropertyFlags currFlags =
    13770  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13771  // This memory type contains requiredFlags.
    13772  if((requiredFlags & ~currFlags) == 0)
    13773  {
    13774  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13775  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13776  // Remember memory type with lowest cost.
    13777  if(currCost < minCost)
    13778  {
    13779  *pMemoryTypeIndex = memTypeIndex;
    13780  if(currCost == 0)
    13781  {
    13782  return VK_SUCCESS;
    13783  }
    13784  minCost = currCost;
    13785  }
    13786  }
    13787  }
    13788  }
    13789  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13790 }
    13791 
    13793  VmaAllocator allocator,
    13794  const VkBufferCreateInfo* pBufferCreateInfo,
    13795  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13796  uint32_t* pMemoryTypeIndex)
    13797 {
    13798  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13799  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13800  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13802 
    13803  const VkDevice hDev = allocator->m_hDevice;
    13804  VkBuffer hBuffer = VK_NULL_HANDLE;
    13805  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13806  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13807  if(res == VK_SUCCESS)
    13808  {
    13809  VkMemoryRequirements memReq = {};
    13810  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13811  hDev, hBuffer, &memReq);
    13812 
    13813  res = vmaFindMemoryTypeIndex(
    13814  allocator,
    13815  memReq.memoryTypeBits,
    13816  pAllocationCreateInfo,
    13817  pMemoryTypeIndex);
    13818 
    13819  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13820  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13821  }
    13822  return res;
    13823 }
    13824 
    13826  VmaAllocator allocator,
    13827  const VkImageCreateInfo* pImageCreateInfo,
    13828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13829  uint32_t* pMemoryTypeIndex)
    13830 {
    13831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13832  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13833  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13835 
    13836  const VkDevice hDev = allocator->m_hDevice;
    13837  VkImage hImage = VK_NULL_HANDLE;
    13838  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13839  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13840  if(res == VK_SUCCESS)
    13841  {
    13842  VkMemoryRequirements memReq = {};
    13843  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13844  hDev, hImage, &memReq);
    13845 
    13846  res = vmaFindMemoryTypeIndex(
    13847  allocator,
    13848  memReq.memoryTypeBits,
    13849  pAllocationCreateInfo,
    13850  pMemoryTypeIndex);
    13851 
    13852  allocator->GetVulkanFunctions().vkDestroyImage(
    13853  hDev, hImage, allocator->GetAllocationCallbacks());
    13854  }
    13855  return res;
    13856 }
    13857 
    13858 VkResult vmaCreatePool(
    13859  VmaAllocator allocator,
    13860  const VmaPoolCreateInfo* pCreateInfo,
    13861  VmaPool* pPool)
    13862 {
    13863  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13864 
    13865  VMA_DEBUG_LOG("vmaCreatePool");
    13866 
    13867  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13868 
    13869  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13870 
    13871 #if VMA_RECORDING_ENABLED
    13872  if(allocator->GetRecorder() != VMA_NULL)
    13873  {
    13874  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13875  }
    13876 #endif
    13877 
    13878  return res;
    13879 }
    13880 
    13881 void vmaDestroyPool(
    13882  VmaAllocator allocator,
    13883  VmaPool pool)
    13884 {
    13885  VMA_ASSERT(allocator);
    13886 
    13887  if(pool == VK_NULL_HANDLE)
    13888  {
    13889  return;
    13890  }
    13891 
    13892  VMA_DEBUG_LOG("vmaDestroyPool");
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13900  }
    13901 #endif
    13902 
    13903  allocator->DestroyPool(pool);
    13904 }
    13905 
    13906 void vmaGetPoolStats(
    13907  VmaAllocator allocator,
    13908  VmaPool pool,
    13909  VmaPoolStats* pPoolStats)
    13910 {
    13911  VMA_ASSERT(allocator && pool && pPoolStats);
    13912 
    13913  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13914 
    13915  allocator->GetPoolStats(pool, pPoolStats);
    13916 }
    13917 
    13919  VmaAllocator allocator,
    13920  VmaPool pool,
    13921  size_t* pLostAllocationCount)
    13922 {
    13923  VMA_ASSERT(allocator && pool);
    13924 
    13925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13926 
    13927 #if VMA_RECORDING_ENABLED
    13928  if(allocator->GetRecorder() != VMA_NULL)
    13929  {
    13930  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13931  }
    13932 #endif
    13933 
    13934  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13935 }
    13936 
    13937 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13938 {
    13939  VMA_ASSERT(allocator && pool);
    13940 
    13941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13942 
    13943  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13944 
    13945  return allocator->CheckPoolCorruption(pool);
    13946 }
    13947 
    13948 VkResult vmaAllocateMemory(
    13949  VmaAllocator allocator,
    13950  const VkMemoryRequirements* pVkMemoryRequirements,
    13951  const VmaAllocationCreateInfo* pCreateInfo,
    13952  VmaAllocation* pAllocation,
    13953  VmaAllocationInfo* pAllocationInfo)
    13954 {
    13955  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13956 
    13957  VMA_DEBUG_LOG("vmaAllocateMemory");
    13958 
    13959  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13960 
    13961  VkResult result = allocator->AllocateMemory(
    13962  *pVkMemoryRequirements,
    13963  false, // requiresDedicatedAllocation
    13964  false, // prefersDedicatedAllocation
    13965  VK_NULL_HANDLE, // dedicatedBuffer
    13966  VK_NULL_HANDLE, // dedicatedImage
    13967  *pCreateInfo,
    13968  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13969  pAllocation);
    13970 
    13971 #if VMA_RECORDING_ENABLED
    13972  if(allocator->GetRecorder() != VMA_NULL)
    13973  {
    13974  allocator->GetRecorder()->RecordAllocateMemory(
    13975  allocator->GetCurrentFrameIndex(),
    13976  *pVkMemoryRequirements,
    13977  *pCreateInfo,
    13978  *pAllocation);
    13979  }
    13980 #endif
    13981 
    13982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13983  {
    13984  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13985  }
    13986 
    13987  return result;
    13988 }
    13989 
    13991  VmaAllocator allocator,
    13992  VkBuffer buffer,
    13993  const VmaAllocationCreateInfo* pCreateInfo,
    13994  VmaAllocation* pAllocation,
    13995  VmaAllocationInfo* pAllocationInfo)
    13996 {
    13997  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13998 
    13999  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14000 
    14001  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14002 
    14003  VkMemoryRequirements vkMemReq = {};
    14004  bool requiresDedicatedAllocation = false;
    14005  bool prefersDedicatedAllocation = false;
    14006  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation);
    14009 
    14010  VkResult result = allocator->AllocateMemory(
    14011  vkMemReq,
    14012  requiresDedicatedAllocation,
    14013  prefersDedicatedAllocation,
    14014  buffer, // dedicatedBuffer
    14015  VK_NULL_HANDLE, // dedicatedImage
    14016  *pCreateInfo,
    14017  VMA_SUBALLOCATION_TYPE_BUFFER,
    14018  pAllocation);
    14019 
    14020 #if VMA_RECORDING_ENABLED
    14021  if(allocator->GetRecorder() != VMA_NULL)
    14022  {
    14023  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14024  allocator->GetCurrentFrameIndex(),
    14025  vkMemReq,
    14026  requiresDedicatedAllocation,
    14027  prefersDedicatedAllocation,
    14028  *pCreateInfo,
    14029  *pAllocation);
    14030  }
    14031 #endif
    14032 
    14033  if(pAllocationInfo && result == VK_SUCCESS)
    14034  {
    14035  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14036  }
    14037 
    14038  return result;
    14039 }
    14040 
    14041 VkResult vmaAllocateMemoryForImage(
    14042  VmaAllocator allocator,
    14043  VkImage image,
    14044  const VmaAllocationCreateInfo* pCreateInfo,
    14045  VmaAllocation* pAllocation,
    14046  VmaAllocationInfo* pAllocationInfo)
    14047 {
    14048  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14049 
    14050  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  VkMemoryRequirements vkMemReq = {};
    14055  bool requiresDedicatedAllocation = false;
    14056  bool prefersDedicatedAllocation = false;
    14057  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14058  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14059 
    14060  VkResult result = allocator->AllocateMemory(
    14061  vkMemReq,
    14062  requiresDedicatedAllocation,
    14063  prefersDedicatedAllocation,
    14064  VK_NULL_HANDLE, // dedicatedBuffer
    14065  image, // dedicatedImage
    14066  *pCreateInfo,
    14067  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14068  pAllocation);
    14069 
    14070 #if VMA_RECORDING_ENABLED
    14071  if(allocator->GetRecorder() != VMA_NULL)
    14072  {
    14073  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14074  allocator->GetCurrentFrameIndex(),
    14075  vkMemReq,
    14076  requiresDedicatedAllocation,
    14077  prefersDedicatedAllocation,
    14078  *pCreateInfo,
    14079  *pAllocation);
    14080  }
    14081 #endif
    14082 
    14083  if(pAllocationInfo && result == VK_SUCCESS)
    14084  {
    14085  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14086  }
    14087 
    14088  return result;
    14089 }
    14090 
    14091 void vmaFreeMemory(
    14092  VmaAllocator allocator,
    14093  VmaAllocation allocation)
    14094 {
    14095  VMA_ASSERT(allocator);
    14096 
    14097  if(allocation == VK_NULL_HANDLE)
    14098  {
    14099  return;
    14100  }
    14101 
    14102  VMA_DEBUG_LOG("vmaFreeMemory");
    14103 
    14104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14105 
    14106 #if VMA_RECORDING_ENABLED
    14107  if(allocator->GetRecorder() != VMA_NULL)
    14108  {
    14109  allocator->GetRecorder()->RecordFreeMemory(
    14110  allocator->GetCurrentFrameIndex(),
    14111  allocation);
    14112  }
    14113 #endif
    14114 
    14115  allocator->FreeMemory(allocation);
    14116 }
    14117 
    14118 VkResult vmaResizeAllocation(
    14119  VmaAllocator allocator,
    14120  VmaAllocation allocation,
    14121  VkDeviceSize newSize)
    14122 {
    14123  VMA_ASSERT(allocator && allocation);
    14124 
    14125  VMA_DEBUG_LOG("vmaResizeAllocation");
    14126 
    14127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14128 
    14129 #if VMA_RECORDING_ENABLED
    14130  if(allocator->GetRecorder() != VMA_NULL)
    14131  {
    14132  allocator->GetRecorder()->RecordResizeAllocation(
    14133  allocator->GetCurrentFrameIndex(),
    14134  allocation,
    14135  newSize);
    14136  }
    14137 #endif
    14138 
    14139  return allocator->ResizeAllocation(allocation, newSize);
    14140 }
    14141 
    14143  VmaAllocator allocator,
    14144  VmaAllocation allocation,
    14145  VmaAllocationInfo* pAllocationInfo)
    14146 {
    14147  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14148 
    14149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14150 
    14151 #if VMA_RECORDING_ENABLED
    14152  if(allocator->GetRecorder() != VMA_NULL)
    14153  {
    14154  allocator->GetRecorder()->RecordGetAllocationInfo(
    14155  allocator->GetCurrentFrameIndex(),
    14156  allocation);
    14157  }
    14158 #endif
    14159 
    14160  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14161 }
    14162 
    14163 VkBool32 vmaTouchAllocation(
    14164  VmaAllocator allocator,
    14165  VmaAllocation allocation)
    14166 {
    14167  VMA_ASSERT(allocator && allocation);
    14168 
    14169  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14170 
    14171 #if VMA_RECORDING_ENABLED
    14172  if(allocator->GetRecorder() != VMA_NULL)
    14173  {
    14174  allocator->GetRecorder()->RecordTouchAllocation(
    14175  allocator->GetCurrentFrameIndex(),
    14176  allocation);
    14177  }
    14178 #endif
    14179 
    14180  return allocator->TouchAllocation(allocation);
    14181 }
    14182 
    14184  VmaAllocator allocator,
    14185  VmaAllocation allocation,
    14186  void* pUserData)
    14187 {
    14188  VMA_ASSERT(allocator && allocation);
    14189 
    14190  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14191 
    14192  allocation->SetUserData(allocator, pUserData);
    14193 
    14194 #if VMA_RECORDING_ENABLED
    14195  if(allocator->GetRecorder() != VMA_NULL)
    14196  {
    14197  allocator->GetRecorder()->RecordSetAllocationUserData(
    14198  allocator->GetCurrentFrameIndex(),
    14199  allocation,
    14200  pUserData);
    14201  }
    14202 #endif
    14203 }
    14204 
    14206  VmaAllocator allocator,
    14207  VmaAllocation* pAllocation)
    14208 {
    14209  VMA_ASSERT(allocator && pAllocation);
    14210 
    14211  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14212 
    14213  allocator->CreateLostAllocation(pAllocation);
    14214 
    14215 #if VMA_RECORDING_ENABLED
    14216  if(allocator->GetRecorder() != VMA_NULL)
    14217  {
    14218  allocator->GetRecorder()->RecordCreateLostAllocation(
    14219  allocator->GetCurrentFrameIndex(),
    14220  *pAllocation);
    14221  }
    14222 #endif
    14223 }
    14224 
    14225 VkResult vmaMapMemory(
    14226  VmaAllocator allocator,
    14227  VmaAllocation allocation,
    14228  void** ppData)
    14229 {
    14230  VMA_ASSERT(allocator && allocation && ppData);
    14231 
    14232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14233 
    14234  VkResult res = allocator->Map(allocation, ppData);
    14235 
    14236 #if VMA_RECORDING_ENABLED
    14237  if(allocator->GetRecorder() != VMA_NULL)
    14238  {
    14239  allocator->GetRecorder()->RecordMapMemory(
    14240  allocator->GetCurrentFrameIndex(),
    14241  allocation);
    14242  }
    14243 #endif
    14244 
    14245  return res;
    14246 }
    14247 
    14248 void vmaUnmapMemory(
    14249  VmaAllocator allocator,
    14250  VmaAllocation allocation)
    14251 {
    14252  VMA_ASSERT(allocator && allocation);
    14253 
    14254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14255 
    14256 #if VMA_RECORDING_ENABLED
    14257  if(allocator->GetRecorder() != VMA_NULL)
    14258  {
    14259  allocator->GetRecorder()->RecordUnmapMemory(
    14260  allocator->GetCurrentFrameIndex(),
    14261  allocation);
    14262  }
    14263 #endif
    14264 
    14265  allocator->Unmap(allocation);
    14266 }
    14267 
    14268 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14269 {
    14270  VMA_ASSERT(allocator && allocation);
    14271 
    14272  VMA_DEBUG_LOG("vmaFlushAllocation");
    14273 
    14274  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14275 
    14276  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14277 
    14278 #if VMA_RECORDING_ENABLED
    14279  if(allocator->GetRecorder() != VMA_NULL)
    14280  {
    14281  allocator->GetRecorder()->RecordFlushAllocation(
    14282  allocator->GetCurrentFrameIndex(),
    14283  allocation, offset, size);
    14284  }
    14285 #endif
    14286 }
    14287 
    14288 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14289 {
    14290  VMA_ASSERT(allocator && allocation);
    14291 
    14292  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14293 
    14294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14295 
    14296  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14297 
    14298 #if VMA_RECORDING_ENABLED
    14299  if(allocator->GetRecorder() != VMA_NULL)
    14300  {
    14301  allocator->GetRecorder()->RecordInvalidateAllocation(
    14302  allocator->GetCurrentFrameIndex(),
    14303  allocation, offset, size);
    14304  }
    14305 #endif
    14306 }
    14307 
    14308 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14309 {
    14310  VMA_ASSERT(allocator);
    14311 
    14312  VMA_DEBUG_LOG("vmaCheckCorruption");
    14313 
    14314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14315 
    14316  return allocator->CheckCorruption(memoryTypeBits);
    14317 }
    14318 
    14319 VkResult vmaDefragment(
    14320  VmaAllocator allocator,
    14321  VmaAllocation* pAllocations,
    14322  size_t allocationCount,
    14323  VkBool32* pAllocationsChanged,
    14324  const VmaDefragmentationInfo *pDefragmentationInfo,
    14325  VmaDefragmentationStats* pDefragmentationStats)
    14326 {
    14327  VMA_ASSERT(allocator && pAllocations);
    14328 
    14329  VMA_DEBUG_LOG("vmaDefragment");
    14330 
    14331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14332 
    14333  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14334 }
    14335 
    14336 VkResult vmaBindBufferMemory(
    14337  VmaAllocator allocator,
    14338  VmaAllocation allocation,
    14339  VkBuffer buffer)
    14340 {
    14341  VMA_ASSERT(allocator && allocation && buffer);
    14342 
    14343  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14344 
    14345  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14346 
    14347  return allocator->BindBufferMemory(allocation, buffer);
    14348 }
    14349 
    14350 VkResult vmaBindImageMemory(
    14351  VmaAllocator allocator,
    14352  VmaAllocation allocation,
    14353  VkImage image)
    14354 {
    14355  VMA_ASSERT(allocator && allocation && image);
    14356 
    14357  VMA_DEBUG_LOG("vmaBindImageMemory");
    14358 
    14359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14360 
    14361  return allocator->BindImageMemory(allocation, image);
    14362 }
    14363 
    14364 VkResult vmaCreateBuffer(
    14365  VmaAllocator allocator,
    14366  const VkBufferCreateInfo* pBufferCreateInfo,
    14367  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14368  VkBuffer* pBuffer,
    14369  VmaAllocation* pAllocation,
    14370  VmaAllocationInfo* pAllocationInfo)
    14371 {
    14372  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14373 
    14374  if(pBufferCreateInfo->size == 0)
    14375  {
    14376  return VK_ERROR_VALIDATION_FAILED_EXT;
    14377  }
    14378 
    14379  VMA_DEBUG_LOG("vmaCreateBuffer");
    14380 
    14381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14382 
    14383  *pBuffer = VK_NULL_HANDLE;
    14384  *pAllocation = VK_NULL_HANDLE;
    14385 
    14386  // 1. Create VkBuffer.
    14387  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14388  allocator->m_hDevice,
    14389  pBufferCreateInfo,
    14390  allocator->GetAllocationCallbacks(),
    14391  pBuffer);
    14392  if(res >= 0)
    14393  {
    14394  // 2. vkGetBufferMemoryRequirements.
    14395  VkMemoryRequirements vkMemReq = {};
    14396  bool requiresDedicatedAllocation = false;
    14397  bool prefersDedicatedAllocation = false;
    14398  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14399  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14400 
    14401  // Make sure alignment requirements for specific buffer usages reported
    14402  // in Physical Device Properties are included in alignment reported by memory requirements.
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14412  }
    14413  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14414  {
    14415  VMA_ASSERT(vkMemReq.alignment %
    14416  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14417  }
    14418 
    14419  // 3. Allocate memory using allocator.
    14420  res = allocator->AllocateMemory(
    14421  vkMemReq,
    14422  requiresDedicatedAllocation,
    14423  prefersDedicatedAllocation,
    14424  *pBuffer, // dedicatedBuffer
    14425  VK_NULL_HANDLE, // dedicatedImage
    14426  *pAllocationCreateInfo,
    14427  VMA_SUBALLOCATION_TYPE_BUFFER,
    14428  pAllocation);
    14429 
    14430 #if VMA_RECORDING_ENABLED
    14431  if(allocator->GetRecorder() != VMA_NULL)
    14432  {
    14433  allocator->GetRecorder()->RecordCreateBuffer(
    14434  allocator->GetCurrentFrameIndex(),
    14435  *pBufferCreateInfo,
    14436  *pAllocationCreateInfo,
    14437  *pAllocation);
    14438  }
    14439 #endif
    14440 
    14441  if(res >= 0)
    14442  {
    14443  // 3. Bind buffer with memory.
    14444  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14445  if(res >= 0)
    14446  {
    14447  // All steps succeeded.
    14448  #if VMA_STATS_STRING_ENABLED
    14449  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14450  #endif
    14451  if(pAllocationInfo != VMA_NULL)
    14452  {
    14453  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14454  }
    14455 
    14456  return VK_SUCCESS;
    14457  }
    14458  allocator->FreeMemory(*pAllocation);
    14459  *pAllocation = VK_NULL_HANDLE;
    14460  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14461  *pBuffer = VK_NULL_HANDLE;
    14462  return res;
    14463  }
    14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14465  *pBuffer = VK_NULL_HANDLE;
    14466  return res;
    14467  }
    14468  return res;
    14469 }
    14470 
    14471 void vmaDestroyBuffer(
    14472  VmaAllocator allocator,
    14473  VkBuffer buffer,
    14474  VmaAllocation allocation)
    14475 {
    14476  VMA_ASSERT(allocator);
    14477 
    14478  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14479  {
    14480  return;
    14481  }
    14482 
    14483  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14484 
    14485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14486 
    14487 #if VMA_RECORDING_ENABLED
    14488  if(allocator->GetRecorder() != VMA_NULL)
    14489  {
    14490  allocator->GetRecorder()->RecordDestroyBuffer(
    14491  allocator->GetCurrentFrameIndex(),
    14492  allocation);
    14493  }
    14494 #endif
    14495 
    14496  if(buffer != VK_NULL_HANDLE)
    14497  {
    14498  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14499  }
    14500 
    14501  if(allocation != VK_NULL_HANDLE)
    14502  {
    14503  allocator->FreeMemory(allocation);
    14504  }
    14505 }
    14506 
    14507 VkResult vmaCreateImage(
    14508  VmaAllocator allocator,
    14509  const VkImageCreateInfo* pImageCreateInfo,
    14510  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14511  VkImage* pImage,
    14512  VmaAllocation* pAllocation,
    14513  VmaAllocationInfo* pAllocationInfo)
    14514 {
    14515  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14516 
    14517  if(pImageCreateInfo->extent.width == 0 ||
    14518  pImageCreateInfo->extent.height == 0 ||
    14519  pImageCreateInfo->extent.depth == 0 ||
    14520  pImageCreateInfo->mipLevels == 0 ||
    14521  pImageCreateInfo->arrayLayers == 0)
    14522  {
    14523  return VK_ERROR_VALIDATION_FAILED_EXT;
    14524  }
    14525 
    14526  VMA_DEBUG_LOG("vmaCreateImage");
    14527 
    14528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14529 
    14530  *pImage = VK_NULL_HANDLE;
    14531  *pAllocation = VK_NULL_HANDLE;
    14532 
    14533  // 1. Create VkImage.
    14534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14535  allocator->m_hDevice,
    14536  pImageCreateInfo,
    14537  allocator->GetAllocationCallbacks(),
    14538  pImage);
    14539  if(res >= 0)
    14540  {
    14541  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14542  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14544 
    14545  // 2. Allocate memory using allocator.
    14546  VkMemoryRequirements vkMemReq = {};
    14547  bool requiresDedicatedAllocation = false;
    14548  bool prefersDedicatedAllocation = false;
    14549  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14550  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14551 
    14552  res = allocator->AllocateMemory(
    14553  vkMemReq,
    14554  requiresDedicatedAllocation,
    14555  prefersDedicatedAllocation,
    14556  VK_NULL_HANDLE, // dedicatedBuffer
    14557  *pImage, // dedicatedImage
    14558  *pAllocationCreateInfo,
    14559  suballocType,
    14560  pAllocation);
    14561 
    14562 #if VMA_RECORDING_ENABLED
    14563  if(allocator->GetRecorder() != VMA_NULL)
    14564  {
    14565  allocator->GetRecorder()->RecordCreateImage(
    14566  allocator->GetCurrentFrameIndex(),
    14567  *pImageCreateInfo,
    14568  *pAllocationCreateInfo,
    14569  *pAllocation);
    14570  }
    14571 #endif
    14572 
    14573  if(res >= 0)
    14574  {
    14575  // 3. Bind image with memory.
    14576  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14577  if(res >= 0)
    14578  {
    14579  // All steps succeeded.
    14580  #if VMA_STATS_STRING_ENABLED
    14581  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14582  #endif
    14583  if(pAllocationInfo != VMA_NULL)
    14584  {
    14585  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14586  }
    14587 
    14588  return VK_SUCCESS;
    14589  }
    14590  allocator->FreeMemory(*pAllocation);
    14591  *pAllocation = VK_NULL_HANDLE;
    14592  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14593  *pImage = VK_NULL_HANDLE;
    14594  return res;
    14595  }
    14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14597  *pImage = VK_NULL_HANDLE;
    14598  return res;
    14599  }
    14600  return res;
    14601 }
    14602 
    14603 void vmaDestroyImage(
    14604  VmaAllocator allocator,
    14605  VkImage image,
    14606  VmaAllocation allocation)
    14607 {
    14608  VMA_ASSERT(allocator);
    14609 
    14610  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14611  {
    14612  return;
    14613  }
    14614 
    14615  VMA_DEBUG_LOG("vmaDestroyImage");
    14616 
    14617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14618 
    14619 #if VMA_RECORDING_ENABLED
    14620  if(allocator->GetRecorder() != VMA_NULL)
    14621  {
    14622  allocator->GetRecorder()->RecordDestroyImage(
    14623  allocator->GetCurrentFrameIndex(),
    14624  allocation);
    14625  }
    14626 #endif
    14627 
    14628  if(image != VK_NULL_HANDLE)
    14629  {
    14630  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14631  }
    14632  if(allocation != VK_NULL_HANDLE)
    14633  {
    14634  allocator->FreeMemory(allocation);
    14635  }
    14636 }
    14637 
    14638 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1887
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1694  const VkDeviceSize* pHeapSizeLimit;
    1715 
    1717 VkResult vmaCreateAllocator(
    1718  const VmaAllocatorCreateInfo* pCreateInfo,
    1719  VmaAllocator* pAllocator);
    1720 
    1722 void vmaDestroyAllocator(
    1723  VmaAllocator allocator);
    1724 
    1730  VmaAllocator allocator,
    1731  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1732 
    1738  VmaAllocator allocator,
    1739  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1740 
    1748  VmaAllocator allocator,
    1749  uint32_t memoryTypeIndex,
    1750  VkMemoryPropertyFlags* pFlags);
    1751 
    1761  VmaAllocator allocator,
    1762  uint32_t frameIndex);
    1763 
    1766 typedef struct VmaStatInfo
    1767 {
    1769  uint32_t blockCount;
    1775  VkDeviceSize usedBytes;
    1777  VkDeviceSize unusedBytes;
    1780 } VmaStatInfo;
    1781 
    1783 typedef struct VmaStats
    1784 {
    1785  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1786  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1788 } VmaStats;
    1789 
    1791 void vmaCalculateStats(
    1792  VmaAllocator allocator,
    1793  VmaStats* pStats);
    1794 
    1795 #define VMA_STATS_STRING_ENABLED 1
    1796 
    1797 #if VMA_STATS_STRING_ENABLED
    1798 
    1800 
    1802 void vmaBuildStatsString(
    1803  VmaAllocator allocator,
    1804  char** ppStatsString,
    1805  VkBool32 detailedMap);
    1806 
    1807 void vmaFreeStatsString(
    1808  VmaAllocator allocator,
    1809  char* pStatsString);
    1810 
    1811 #endif // #if VMA_STATS_STRING_ENABLED
    1812 
    1821 VK_DEFINE_HANDLE(VmaPool)
    1822 
    1823 typedef enum VmaMemoryUsage
    1824 {
    1873 } VmaMemoryUsage;
    1874 
    1889 
    1944 
    1960 
    1970 
    1977 
    1981 
    1983 {
    1996  VkMemoryPropertyFlags requiredFlags;
    2001  VkMemoryPropertyFlags preferredFlags;
    2009  uint32_t memoryTypeBits;
    2022  void* pUserData;
    2024 
    2041 VkResult vmaFindMemoryTypeIndex(
    2042  VmaAllocator allocator,
    2043  uint32_t memoryTypeBits,
    2044  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2045  uint32_t* pMemoryTypeIndex);
    2046 
    2060  VmaAllocator allocator,
    2061  const VkBufferCreateInfo* pBufferCreateInfo,
    2062  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2063  uint32_t* pMemoryTypeIndex);
    2064 
    2078  VmaAllocator allocator,
    2079  const VkImageCreateInfo* pImageCreateInfo,
    2080  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2081  uint32_t* pMemoryTypeIndex);
    2082 
    2103 
    2120 
    2131 
    2137 
    2140 typedef VkFlags VmaPoolCreateFlags;
    2141 
    2144 typedef struct VmaPoolCreateInfo {
    2159  VkDeviceSize blockSize;
    2188 
    2191 typedef struct VmaPoolStats {
    2194  VkDeviceSize size;
    2197  VkDeviceSize unusedSize;
    2210  VkDeviceSize unusedRangeSizeMax;
    2213  size_t blockCount;
    2214 } VmaPoolStats;
    2215 
    2222 VkResult vmaCreatePool(
    2223  VmaAllocator allocator,
    2224  const VmaPoolCreateInfo* pCreateInfo,
    2225  VmaPool* pPool);
    2226 
    2229 void vmaDestroyPool(
    2230  VmaAllocator allocator,
    2231  VmaPool pool);
    2232 
    2239 void vmaGetPoolStats(
    2240  VmaAllocator allocator,
    2241  VmaPool pool,
    2242  VmaPoolStats* pPoolStats);
    2243 
    2251  VmaAllocator allocator,
    2252  VmaPool pool,
    2253  size_t* pLostAllocationCount);
    2254 
    2269 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2270 
    2295 VK_DEFINE_HANDLE(VmaAllocation)
    2296 
    2297 
    2299 typedef struct VmaAllocationInfo {
    2304  uint32_t memoryType;
    2313  VkDeviceMemory deviceMemory;
    2318  VkDeviceSize offset;
    2323  VkDeviceSize size;
    2337  void* pUserData;
    2339 
    2350 VkResult vmaAllocateMemory(
    2351  VmaAllocator allocator,
    2352  const VkMemoryRequirements* pVkMemoryRequirements,
    2353  const VmaAllocationCreateInfo* pCreateInfo,
    2354  VmaAllocation* pAllocation,
    2355  VmaAllocationInfo* pAllocationInfo);
    2356 
    2364  VmaAllocator allocator,
    2365  VkBuffer buffer,
    2366  const VmaAllocationCreateInfo* pCreateInfo,
    2367  VmaAllocation* pAllocation,
    2368  VmaAllocationInfo* pAllocationInfo);
    2369 
    2371 VkResult vmaAllocateMemoryForImage(
    2372  VmaAllocator allocator,
    2373  VkImage image,
    2374  const VmaAllocationCreateInfo* pCreateInfo,
    2375  VmaAllocation* pAllocation,
    2376  VmaAllocationInfo* pAllocationInfo);
    2377 
    2379 void vmaFreeMemory(
    2380  VmaAllocator allocator,
    2381  VmaAllocation allocation);
    2382 
    2403 VkResult vmaResizeAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation,
    2406  VkDeviceSize newSize);
    2407 
    2425  VmaAllocator allocator,
    2426  VmaAllocation allocation,
    2427  VmaAllocationInfo* pAllocationInfo);
    2428 
    2443 VkBool32 vmaTouchAllocation(
    2444  VmaAllocator allocator,
    2445  VmaAllocation allocation);
    2446 
    2461  VmaAllocator allocator,
    2462  VmaAllocation allocation,
    2463  void* pUserData);
    2464 
    2476  VmaAllocator allocator,
    2477  VmaAllocation* pAllocation);
    2478 
    2513 VkResult vmaMapMemory(
    2514  VmaAllocator allocator,
    2515  VmaAllocation allocation,
    2516  void** ppData);
    2517 
    2522 void vmaUnmapMemory(
    2523  VmaAllocator allocator,
    2524  VmaAllocation allocation);
    2525 
    2538 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2539 
    2552 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2553 
    2570 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2571 
    2573 typedef struct VmaDefragmentationInfo {
    2578  VkDeviceSize maxBytesToMove;
    2585 
    2587 typedef struct VmaDefragmentationStats {
    2589  VkDeviceSize bytesMoved;
    2591  VkDeviceSize bytesFreed;
    2597 
    2636 VkResult vmaDefragment(
    2637  VmaAllocator allocator,
    2638  VmaAllocation* pAllocations,
    2639  size_t allocationCount,
    2640  VkBool32* pAllocationsChanged,
    2641  const VmaDefragmentationInfo *pDefragmentationInfo,
    2642  VmaDefragmentationStats* pDefragmentationStats);
    2643 
    2656 VkResult vmaBindBufferMemory(
    2657  VmaAllocator allocator,
    2658  VmaAllocation allocation,
    2659  VkBuffer buffer);
    2660 
    2673 VkResult vmaBindImageMemory(
    2674  VmaAllocator allocator,
    2675  VmaAllocation allocation,
    2676  VkImage image);
    2677 
    2704 VkResult vmaCreateBuffer(
    2705  VmaAllocator allocator,
    2706  const VkBufferCreateInfo* pBufferCreateInfo,
    2707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2708  VkBuffer* pBuffer,
    2709  VmaAllocation* pAllocation,
    2710  VmaAllocationInfo* pAllocationInfo);
    2711 
    2723 void vmaDestroyBuffer(
    2724  VmaAllocator allocator,
    2725  VkBuffer buffer,
    2726  VmaAllocation allocation);
    2727 
    2729 VkResult vmaCreateImage(
    2730  VmaAllocator allocator,
    2731  const VkImageCreateInfo* pImageCreateInfo,
    2732  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2733  VkImage* pImage,
    2734  VmaAllocation* pAllocation,
    2735  VmaAllocationInfo* pAllocationInfo);
    2736 
    2748 void vmaDestroyImage(
    2749  VmaAllocator allocator,
    2750  VkImage image,
    2751  VmaAllocation allocation);
    2752 
    2753 #ifdef __cplusplus
    2754 }
    2755 #endif
    2756 
    2757 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2758 
    2759 // For Visual Studio IntelliSense.
    2760 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2761 #define VMA_IMPLEMENTATION
    2762 #endif
    2763 
    2764 #ifdef VMA_IMPLEMENTATION
    2765 #undef VMA_IMPLEMENTATION
    2766 
    2767 #include <cstdint>
    2768 #include <cstdlib>
    2769 #include <cstring>
    2770 
    2771 /*******************************************************************************
    2772 CONFIGURATION SECTION
    2773 
    2774 Define some of these macros before each #include of this header or change them
    2775 here if you need other then default behavior depending on your environment.
    2776 */
    2777 
    2778 /*
    2779 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2780 internally, like:
    2781 
    2782  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2783 
    2784 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2785 VmaAllocatorCreateInfo::pVulkanFunctions.
    2786 */
    2787 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2788 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2789 #endif
    2790 
    2791 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2792 //#define VMA_USE_STL_CONTAINERS 1
    2793 
    2794 /* Set this macro to 1 to make the library including and using STL containers:
    2795 std::pair, std::vector, std::list, std::unordered_map.
    2796 
    2797 Set it to 0 or undefined to make the library using its own implementation of
    2798 the containers.
    2799 */
    2800 #if VMA_USE_STL_CONTAINERS
    2801  #define VMA_USE_STL_VECTOR 1
    2802  #define VMA_USE_STL_UNORDERED_MAP 1
    2803  #define VMA_USE_STL_LIST 1
    2804 #endif
    2805 
    2806 #if VMA_USE_STL_VECTOR
    2807  #include <vector>
    2808 #endif
    2809 
    2810 #if VMA_USE_STL_UNORDERED_MAP
    2811  #include <unordered_map>
    2812 #endif
    2813 
    2814 #if VMA_USE_STL_LIST
    2815  #include <list>
    2816 #endif
    2817 
    2818 /*
    2819 Following headers are used in this CONFIGURATION section only, so feel free to
    2820 remove them if not needed.
    2821 */
    2822 #include <cassert> // for assert
    2823 #include <algorithm> // for min, max
    2824 #include <mutex> // for std::mutex
    2825 #include <atomic> // for std::atomic
    2826 
    2827 #ifndef VMA_NULL
    2828  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2829  #define VMA_NULL nullptr
    2830 #endif
    2831 
    2832 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2833 #include <cstdlib>
    2834 void *aligned_alloc(size_t alignment, size_t size)
    2835 {
    2836  // alignment must be >= sizeof(void*)
    2837  if(alignment < sizeof(void*))
    2838  {
    2839  alignment = sizeof(void*);
    2840  }
    2841 
    2842  return memalign(alignment, size);
    2843 }
    2844 #elif defined(__APPLE__) || defined(__ANDROID__)
    2845 #include <cstdlib>
    2846 void *aligned_alloc(size_t alignment, size_t size)
    2847 {
    2848  // alignment must be >= sizeof(void*)
    2849  if(alignment < sizeof(void*))
    2850  {
    2851  alignment = sizeof(void*);
    2852  }
    2853 
    2854  void *pointer;
    2855  if(posix_memalign(&pointer, alignment, size) == 0)
    2856  return pointer;
    2857  return VMA_NULL;
    2858 }
    2859 #endif
    2860 
    2861 // If your compiler is not compatible with C++11 and definition of
    2862 // aligned_alloc() function is missing, uncommeting following line may help:
    2863 
    2864 //#include <malloc.h>
    2865 
    2866 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2867 #ifndef VMA_ASSERT
    2868  #ifdef _DEBUG
    2869  #define VMA_ASSERT(expr) assert(expr)
    2870  #else
    2871  #define VMA_ASSERT(expr)
    2872  #endif
    2873 #endif
    2874 
    2875 // Assert that will be called very often, like inside data structures e.g. operator[].
    2876 // Making it non-empty can make program slow.
    2877 #ifndef VMA_HEAVY_ASSERT
    2878  #ifdef _DEBUG
    2879  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2880  #else
    2881  #define VMA_HEAVY_ASSERT(expr)
    2882  #endif
    2883 #endif
    2884 
    2885 #ifndef VMA_ALIGN_OF
    2886  #define VMA_ALIGN_OF(type) (__alignof(type))
    2887 #endif
    2888 
    2889 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2890  #if defined(_WIN32)
    2891  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2892  #else
    2893  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2894  #endif
    2895 #endif
    2896 
    2897 #ifndef VMA_SYSTEM_FREE
    2898  #if defined(_WIN32)
    2899  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2900  #else
    2901  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2902  #endif
    2903 #endif
    2904 
    2905 #ifndef VMA_MIN
    2906  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2907 #endif
    2908 
    2909 #ifndef VMA_MAX
    2910  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2911 #endif
    2912 
    2913 #ifndef VMA_SWAP
    2914  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2915 #endif
    2916 
    2917 #ifndef VMA_SORT
    2918  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_LOG
    2922  #define VMA_DEBUG_LOG(format, ...)
    2923  /*
    2924  #define VMA_DEBUG_LOG(format, ...) do { \
    2925  printf(format, __VA_ARGS__); \
    2926  printf("\n"); \
    2927  } while(false)
    2928  */
    2929 #endif
    2930 
    2931 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2932 #if VMA_STATS_STRING_ENABLED
    2933  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2934  {
    2935  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2936  }
    2937  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2938  {
    2939  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2940  }
    2941  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2942  {
    2943  snprintf(outStr, strLen, "%p", ptr);
    2944  }
    2945 #endif
    2946 
    2947 #ifndef VMA_MUTEX
    2948  class VmaMutex
    2949  {
    2950  public:
    2951  VmaMutex() { }
    2952  ~VmaMutex() { }
    2953  void Lock() { m_Mutex.lock(); }
    2954  void Unlock() { m_Mutex.unlock(); }
    2955  private:
    2956  std::mutex m_Mutex;
    2957  };
    2958  #define VMA_MUTEX VmaMutex
    2959 #endif
    2960 
    2961 /*
    2962 If providing your own implementation, you need to implement a subset of std::atomic:
    2963 
    2964 - Constructor(uint32_t desired)
    2965 - uint32_t load() const
    2966 - void store(uint32_t desired)
    2967 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2968 */
    2969 #ifndef VMA_ATOMIC_UINT32
    2970  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2971 #endif
    2972 
    2973 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2974 
    2978  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2979 #endif
    2980 
    2981 #ifndef VMA_DEBUG_ALIGNMENT
    2982 
    2986  #define VMA_DEBUG_ALIGNMENT (1)
    2987 #endif
    2988 
    2989 #ifndef VMA_DEBUG_MARGIN
    2990 
    2994  #define VMA_DEBUG_MARGIN (0)
    2995 #endif
    2996 
    2997 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2998 
    3002  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3003 #endif
    3004 
    3005 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3006 
    3011  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3012 #endif
    3013 
    3014 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3015 
    3019  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3020 #endif
    3021 
    3022 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3023 
    3027  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3028 #endif
    3029 
    3030 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3031  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3033 #endif
    3034 
    3035 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3036  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3038 #endif
    3039 
    3040 #ifndef VMA_CLASS_NO_COPY
    3041  #define VMA_CLASS_NO_COPY(className) \
    3042  private: \
    3043  className(const className&) = delete; \
    3044  className& operator=(const className&) = delete;
    3045 #endif
    3046 
    3047 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3048 
    3049 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3050 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3051 
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3053 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3054 
    3055 /*******************************************************************************
    3056 END OF CONFIGURATION
    3057 */
    3058 
    3059 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3060  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3061 
    3062 // Returns number of bits set to 1 in (v).
    3063 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3064 {
    3065  uint32_t c = v - ((v >> 1) & 0x55555555);
    3066  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3067  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3068  c = ((c >> 8) + c) & 0x00FF00FF;
    3069  c = ((c >> 16) + c) & 0x0000FFFF;
    3070  return c;
    3071 }
    3072 
    3073 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3074 // Use types like uint32_t, uint64_t as T.
    3075 template <typename T>
    3076 static inline T VmaAlignUp(T val, T align)
    3077 {
    3078  return (val + align - 1) / align * align;
    3079 }
    3080 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3081 // Use types like uint32_t, uint64_t as T.
    3082 template <typename T>
    3083 static inline T VmaAlignDown(T val, T align)
    3084 {
    3085  return val / align * align;
    3086 }
    3087 
    3088 // Division with mathematical rounding to nearest number.
    3089 template <typename T>
    3090 static inline T VmaRoundDiv(T x, T y)
    3091 {
    3092  return (x + (y / (T)2)) / y;
    3093 }
    3094 
    3095 /*
    3096 Returns true if given number is a power of two.
    3097 T must be unsigned integer number or signed integer but always nonnegative.
    3098 For 0 returns true.
    3099 */
    3100 template <typename T>
    3101 inline bool VmaIsPow2(T x)
    3102 {
    3103  return (x & (x-1)) == 0;
    3104 }
    3105 
    3106 // Returns smallest power of 2 greater or equal to v.
    3107 static inline uint32_t VmaNextPow2(uint32_t v)
    3108 {
    3109  v--;
    3110  v |= v >> 1;
    3111  v |= v >> 2;
    3112  v |= v >> 4;
    3113  v |= v >> 8;
    3114  v |= v >> 16;
    3115  v++;
    3116  return v;
    3117 }
    3118 static inline uint64_t VmaNextPow2(uint64_t v)
    3119 {
    3120  v--;
    3121  v |= v >> 1;
    3122  v |= v >> 2;
    3123  v |= v >> 4;
    3124  v |= v >> 8;
    3125  v |= v >> 16;
    3126  v |= v >> 32;
    3127  v++;
    3128  return v;
    3129 }
    3130 
    3131 // Returns largest power of 2 less or equal to v.
    3132 static inline uint32_t VmaPrevPow2(uint32_t v)
    3133 {
    3134  v |= v >> 1;
    3135  v |= v >> 2;
    3136  v |= v >> 4;
    3137  v |= v >> 8;
    3138  v |= v >> 16;
    3139  v = v ^ (v >> 1);
    3140  return v;
    3141 }
    3142 static inline uint64_t VmaPrevPow2(uint64_t v)
    3143 {
    3144  v |= v >> 1;
    3145  v |= v >> 2;
    3146  v |= v >> 4;
    3147  v |= v >> 8;
    3148  v |= v >> 16;
    3149  v |= v >> 32;
    3150  v = v ^ (v >> 1);
    3151  return v;
    3152 }
    3153 
    3154 static inline bool VmaStrIsEmpty(const char* pStr)
    3155 {
    3156  return pStr == VMA_NULL || *pStr == '\0';
    3157 }
    3158 
    3159 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3160 {
    3161  switch(algorithm)
    3162  {
    3164  return "Linear";
    3166  return "Buddy";
    3167  case 0:
    3168  return "Default";
    3169  default:
    3170  VMA_ASSERT(0);
    3171  return "";
    3172  }
    3173 }
    3174 
    3175 #ifndef VMA_SORT
    3176 
    3177 template<typename Iterator, typename Compare>
    3178 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3179 {
    3180  Iterator centerValue = end; --centerValue;
    3181  Iterator insertIndex = beg;
    3182  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3183  {
    3184  if(cmp(*memTypeIndex, *centerValue))
    3185  {
    3186  if(insertIndex != memTypeIndex)
    3187  {
    3188  VMA_SWAP(*memTypeIndex, *insertIndex);
    3189  }
    3190  ++insertIndex;
    3191  }
    3192  }
    3193  if(insertIndex != centerValue)
    3194  {
    3195  VMA_SWAP(*insertIndex, *centerValue);
    3196  }
    3197  return insertIndex;
    3198 }
    3199 
    3200 template<typename Iterator, typename Compare>
    3201 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3202 {
    3203  if(beg < end)
    3204  {
    3205  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3206  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3207  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3208  }
    3209 }
    3210 
    3211 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3212 
    3213 #endif // #ifndef VMA_SORT
    3214 
    3215 /*
    3216 Returns true if two memory blocks occupy overlapping pages.
    3217 ResourceA must be in less memory offset than ResourceB.
    3218 
    3219 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3220 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3221 */
    3222 static inline bool VmaBlocksOnSamePage(
    3223  VkDeviceSize resourceAOffset,
    3224  VkDeviceSize resourceASize,
    3225  VkDeviceSize resourceBOffset,
    3226  VkDeviceSize pageSize)
    3227 {
    3228  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3229  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3230  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3231  VkDeviceSize resourceBStart = resourceBOffset;
    3232  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3233  return resourceAEndPage == resourceBStartPage;
    3234 }
    3235 
    3236 enum VmaSuballocationType
    3237 {
    3238  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3239  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3240  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3243  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3244  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3245 };
    3246 
    3247 /*
    3248 Returns true if given suballocation types could conflict and must respect
    3249 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3250 or linear image and another one is optimal image. If type is unknown, behave
    3251 conservatively.
    3252 */
    3253 static inline bool VmaIsBufferImageGranularityConflict(
    3254  VmaSuballocationType suballocType1,
    3255  VmaSuballocationType suballocType2)
    3256 {
    3257  if(suballocType1 > suballocType2)
    3258  {
    3259  VMA_SWAP(suballocType1, suballocType2);
    3260  }
    3261 
    3262  switch(suballocType1)
    3263  {
    3264  case VMA_SUBALLOCATION_TYPE_FREE:
    3265  return false;
    3266  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3267  return true;
    3268  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3272  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3273  return
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3276  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3277  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3278  return
    3279  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3280  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3281  return false;
    3282  default:
    3283  VMA_ASSERT(0);
    3284  return true;
    3285  }
    3286 }
    3287 
    3288 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3289 {
    3290  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3291  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3292  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3293  {
    3294  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3295  }
    3296 }
    3297 
    3298 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3299 {
    3300  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3301  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3302  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3303  {
    3304  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3305  {
    3306  return false;
    3307  }
    3308  }
    3309  return true;
    3310 }
    3311 
    3312 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3313 struct VmaMutexLock
    3314 {
    3315  VMA_CLASS_NO_COPY(VmaMutexLock)
    3316 public:
    3317  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3318  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3319  {
    3320  if(m_pMutex)
    3321  {
    3322  m_pMutex->Lock();
    3323  }
    3324  }
    3325 
    3326  ~VmaMutexLock()
    3327  {
    3328  if(m_pMutex)
    3329  {
    3330  m_pMutex->Unlock();
    3331  }
    3332  }
    3333 
    3334 private:
    3335  VMA_MUTEX* m_pMutex;
    3336 };
    3337 
    3338 #if VMA_DEBUG_GLOBAL_MUTEX
    3339  static VMA_MUTEX gDebugGlobalMutex;
    3340  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3341 #else
    3342  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3343 #endif
    3344 
    3345 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3346 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3347 
    3348 /*
    3349 Performs binary search and returns iterator to first element that is greater or
    3350 equal to (key), according to comparison (cmp).
    3351 
    3352 Cmp should return true if first argument is less than second argument.
    3353 
    3354 Returned value is the found element, if present in the collection or place where
    3355 new element with value (key) should be inserted.
    3356 */
    3357 template <typename CmpLess, typename IterT, typename KeyT>
    3358 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3359 {
    3360  size_t down = 0, up = (end - beg);
    3361  while(down < up)
    3362  {
    3363  const size_t mid = (down + up) / 2;
    3364  if(cmp(*(beg+mid), key))
    3365  {
    3366  down = mid + 1;
    3367  }
    3368  else
    3369  {
    3370  up = mid;
    3371  }
    3372  }
    3373  return beg + down;
    3374 }
    3375 
    3377 // Memory allocation
    3378 
    3379 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3380 {
    3381  if((pAllocationCallbacks != VMA_NULL) &&
    3382  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3383  {
    3384  return (*pAllocationCallbacks->pfnAllocation)(
    3385  pAllocationCallbacks->pUserData,
    3386  size,
    3387  alignment,
    3388  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3389  }
    3390  else
    3391  {
    3392  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3393  }
    3394 }
    3395 
    3396 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3397 {
    3398  if((pAllocationCallbacks != VMA_NULL) &&
    3399  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3400  {
    3401  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3402  }
    3403  else
    3404  {
    3405  VMA_SYSTEM_FREE(ptr);
    3406  }
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 template<typename T>
    3416 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3417 {
    3418  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3419 }
    3420 
    3421 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3422 
    3423 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3424 
    3425 template<typename T>
    3426 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3427 {
    3428  ptr->~T();
    3429  VmaFree(pAllocationCallbacks, ptr);
    3430 }
    3431 
    3432 template<typename T>
    3433 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3434 {
    3435  if(ptr != VMA_NULL)
    3436  {
    3437  for(size_t i = count; i--; )
    3438  {
    3439  ptr[i].~T();
    3440  }
    3441  VmaFree(pAllocationCallbacks, ptr);
    3442  }
    3443 }
    3444 
    3445 // STL-compatible allocator.
    3446 template<typename T>
    3447 class VmaStlAllocator
    3448 {
    3449 public:
    3450  const VkAllocationCallbacks* const m_pCallbacks;
    3451  typedef T value_type;
    3452 
    3453  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3454  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3455 
    3456  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3457  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3458 
    3459  template<typename U>
    3460  bool operator==(const VmaStlAllocator<U>& rhs) const
    3461  {
    3462  return m_pCallbacks == rhs.m_pCallbacks;
    3463  }
    3464  template<typename U>
    3465  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3466  {
    3467  return m_pCallbacks != rhs.m_pCallbacks;
    3468  }
    3469 
    3470  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3471 };
    3472 
    3473 #if VMA_USE_STL_VECTOR
    3474 
    3475 #define VmaVector std::vector
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3479 {
    3480  vec.insert(vec.begin() + index, item);
    3481 }
    3482 
    3483 template<typename T, typename allocatorT>
    3484 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3485 {
    3486  vec.erase(vec.begin() + index);
    3487 }
    3488 
    3489 #else // #if VMA_USE_STL_VECTOR
    3490 
    3491 /* Class with interface compatible with subset of std::vector.
    3492 T must be POD because constructors and destructors are not called and memcpy is
    3493 used for these objects. */
    3494 template<typename T, typename AllocatorT>
    3495 class VmaVector
    3496 {
    3497 public:
    3498  typedef T value_type;
    3499 
    3500  VmaVector(const AllocatorT& allocator) :
    3501  m_Allocator(allocator),
    3502  m_pArray(VMA_NULL),
    3503  m_Count(0),
    3504  m_Capacity(0)
    3505  {
    3506  }
    3507 
    3508  VmaVector(size_t count, const AllocatorT& allocator) :
    3509  m_Allocator(allocator),
    3510  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3511  m_Count(count),
    3512  m_Capacity(count)
    3513  {
    3514  }
    3515 
    3516  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3517  m_Allocator(src.m_Allocator),
    3518  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3519  m_Count(src.m_Count),
    3520  m_Capacity(src.m_Count)
    3521  {
    3522  if(m_Count != 0)
    3523  {
    3524  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3525  }
    3526  }
    3527 
    3528  ~VmaVector()
    3529  {
    3530  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3531  }
    3532 
    3533  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3534  {
    3535  if(&rhs != this)
    3536  {
    3537  resize(rhs.m_Count);
    3538  if(m_Count != 0)
    3539  {
    3540  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3541  }
    3542  }
    3543  return *this;
    3544  }
    3545 
    3546  bool empty() const { return m_Count == 0; }
    3547  size_t size() const { return m_Count; }
    3548  T* data() { return m_pArray; }
    3549  const T* data() const { return m_pArray; }
    3550 
    3551  T& operator[](size_t index)
    3552  {
    3553  VMA_HEAVY_ASSERT(index < m_Count);
    3554  return m_pArray[index];
    3555  }
    3556  const T& operator[](size_t index) const
    3557  {
    3558  VMA_HEAVY_ASSERT(index < m_Count);
    3559  return m_pArray[index];
    3560  }
    3561 
    3562  T& front()
    3563  {
    3564  VMA_HEAVY_ASSERT(m_Count > 0);
    3565  return m_pArray[0];
    3566  }
    3567  const T& front() const
    3568  {
    3569  VMA_HEAVY_ASSERT(m_Count > 0);
    3570  return m_pArray[0];
    3571  }
    3572  T& back()
    3573  {
    3574  VMA_HEAVY_ASSERT(m_Count > 0);
    3575  return m_pArray[m_Count - 1];
    3576  }
    3577  const T& back() const
    3578  {
    3579  VMA_HEAVY_ASSERT(m_Count > 0);
    3580  return m_pArray[m_Count - 1];
    3581  }
    3582 
    3583  void reserve(size_t newCapacity, bool freeMemory = false)
    3584  {
    3585  newCapacity = VMA_MAX(newCapacity, m_Count);
    3586 
    3587  if((newCapacity < m_Capacity) && !freeMemory)
    3588  {
    3589  newCapacity = m_Capacity;
    3590  }
    3591 
    3592  if(newCapacity != m_Capacity)
    3593  {
    3594  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3595  if(m_Count != 0)
    3596  {
    3597  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3598  }
    3599  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3600  m_Capacity = newCapacity;
    3601  m_pArray = newArray;
    3602  }
    3603  }
    3604 
    3605  void resize(size_t newCount, bool freeMemory = false)
    3606  {
    3607  size_t newCapacity = m_Capacity;
    3608  if(newCount > m_Capacity)
    3609  {
    3610  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3611  }
    3612  else if(freeMemory)
    3613  {
    3614  newCapacity = newCount;
    3615  }
    3616 
    3617  if(newCapacity != m_Capacity)
    3618  {
    3619  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3620  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3621  if(elementsToCopy != 0)
    3622  {
    3623  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3624  }
    3625  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3626  m_Capacity = newCapacity;
    3627  m_pArray = newArray;
    3628  }
    3629 
    3630  m_Count = newCount;
    3631  }
    3632 
    3633  void clear(bool freeMemory = false)
    3634  {
    3635  resize(0, freeMemory);
    3636  }
    3637 
    3638  void insert(size_t index, const T& src)
    3639  {
    3640  VMA_HEAVY_ASSERT(index <= m_Count);
    3641  const size_t oldCount = size();
    3642  resize(oldCount + 1);
    3643  if(index < oldCount)
    3644  {
    3645  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3646  }
    3647  m_pArray[index] = src;
    3648  }
    3649 
    3650  void remove(size_t index)
    3651  {
    3652  VMA_HEAVY_ASSERT(index < m_Count);
    3653  const size_t oldCount = size();
    3654  if(index < oldCount - 1)
    3655  {
    3656  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3657  }
    3658  resize(oldCount - 1);
    3659  }
    3660 
    3661  void push_back(const T& src)
    3662  {
    3663  const size_t newIndex = size();
    3664  resize(newIndex + 1);
    3665  m_pArray[newIndex] = src;
    3666  }
    3667 
    3668  void pop_back()
    3669  {
    3670  VMA_HEAVY_ASSERT(m_Count > 0);
    3671  resize(size() - 1);
    3672  }
    3673 
    3674  void push_front(const T& src)
    3675  {
    3676  insert(0, src);
    3677  }
    3678 
    3679  void pop_front()
    3680  {
    3681  VMA_HEAVY_ASSERT(m_Count > 0);
    3682  remove(0);
    3683  }
    3684 
    3685  typedef T* iterator;
    3686 
    3687  iterator begin() { return m_pArray; }
    3688  iterator end() { return m_pArray + m_Count; }
    3689 
    3690 private:
    3691  AllocatorT m_Allocator;
    3692  T* m_pArray;
    3693  size_t m_Count;
    3694  size_t m_Capacity;
    3695 };
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3699 {
    3700  vec.insert(index, item);
    3701 }
    3702 
    3703 template<typename T, typename allocatorT>
    3704 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3705 {
    3706  vec.remove(index);
    3707 }
    3708 
    3709 #endif // #if VMA_USE_STL_VECTOR
    3710 
    3711 template<typename CmpLess, typename VectorT>
    3712 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3713 {
    3714  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3715  vector.data(),
    3716  vector.data() + vector.size(),
    3717  value,
    3718  CmpLess()) - vector.data();
    3719  VmaVectorInsert(vector, indexToInsert, value);
    3720  return indexToInsert;
    3721 }
    3722 
    3723 template<typename CmpLess, typename VectorT>
    3724 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3725 {
    3726  CmpLess comparator;
    3727  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3728  vector.begin(),
    3729  vector.end(),
    3730  value,
    3731  comparator);
    3732  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3733  {
    3734  size_t indexToRemove = it - vector.begin();
    3735  VmaVectorRemove(vector, indexToRemove);
    3736  return true;
    3737  }
    3738  return false;
    3739 }
    3740 
    3741 template<typename CmpLess, typename IterT, typename KeyT>
    3742 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3743 {
    3744  CmpLess comparator;
    3745  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3746  beg, end, value, comparator);
    3747  if(it == end ||
    3748  (!comparator(*it, value) && !comparator(value, *it)))
    3749  {
    3750  return it;
    3751  }
    3752  return end;
    3753 }
    3754 
    3756 // class VmaPoolAllocator
    3757 
    3758 /*
    3759 Allocator for objects of type T using a list of arrays (pools) to speed up
    3760 allocation. Number of elements that can be allocated is not bounded because
    3761 allocator can create multiple blocks.
    3762 */
    3763 template<typename T>
    3764 class VmaPoolAllocator
    3765 {
    3766  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3767 public:
    3768  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3769  ~VmaPoolAllocator();
    3770  void Clear();
    3771  T* Alloc();
    3772  void Free(T* ptr);
    3773 
    3774 private:
    3775  union Item
    3776  {
    3777  uint32_t NextFreeIndex;
    3778  T Value;
    3779  };
    3780 
    3781  struct ItemBlock
    3782  {
    3783  Item* pItems;
    3784  uint32_t FirstFreeIndex;
    3785  };
    3786 
    3787  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3788  size_t m_ItemsPerBlock;
    3789  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3790 
    3791  ItemBlock& CreateNewBlock();
    3792 };
    3793 
    3794 template<typename T>
    3795 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3796  m_pAllocationCallbacks(pAllocationCallbacks),
    3797  m_ItemsPerBlock(itemsPerBlock),
    3798  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3799 {
    3800  VMA_ASSERT(itemsPerBlock > 0);
    3801 }
    3802 
    3803 template<typename T>
    3804 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3805 {
    3806  Clear();
    3807 }
    3808 
    3809 template<typename T>
    3810 void VmaPoolAllocator<T>::Clear()
    3811 {
    3812  for(size_t i = m_ItemBlocks.size(); i--; )
    3813  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3814  m_ItemBlocks.clear();
    3815 }
    3816 
    3817 template<typename T>
    3818 T* VmaPoolAllocator<T>::Alloc()
    3819 {
    3820  for(size_t i = m_ItemBlocks.size(); i--; )
    3821  {
    3822  ItemBlock& block = m_ItemBlocks[i];
    3823  // This block has some free items: Use first one.
    3824  if(block.FirstFreeIndex != UINT32_MAX)
    3825  {
    3826  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3827  block.FirstFreeIndex = pItem->NextFreeIndex;
    3828  return &pItem->Value;
    3829  }
    3830  }
    3831 
    3832  // No block has free item: Create new one and use it.
    3833  ItemBlock& newBlock = CreateNewBlock();
    3834  Item* const pItem = &newBlock.pItems[0];
    3835  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3836  return &pItem->Value;
    3837 }
    3838 
    3839 template<typename T>
    3840 void VmaPoolAllocator<T>::Free(T* ptr)
    3841 {
    3842  // Search all memory blocks to find ptr.
    3843  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3844  {
    3845  ItemBlock& block = m_ItemBlocks[i];
    3846 
    3847  // Casting to union.
    3848  Item* pItemPtr;
    3849  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3850 
    3851  // Check if pItemPtr is in address range of this block.
    3852  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3853  {
    3854  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3855  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3856  block.FirstFreeIndex = index;
    3857  return;
    3858  }
    3859  }
    3860  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3861 }
    3862 
    3863 template<typename T>
    3864 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3865 {
    3866  ItemBlock newBlock = {
    3867  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3868 
    3869  m_ItemBlocks.push_back(newBlock);
    3870 
    3871  // Setup singly-linked list of all free items in this block.
    3872  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3873  newBlock.pItems[i].NextFreeIndex = i + 1;
    3874  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3875  return m_ItemBlocks.back();
    3876 }
    3877 
    3879 // class VmaRawList, VmaList
    3880 
    3881 #if VMA_USE_STL_LIST
    3882 
    3883 #define VmaList std::list
    3884 
    3885 #else // #if VMA_USE_STL_LIST
    3886 
    3887 template<typename T>
    3888 struct VmaListItem
    3889 {
    3890  VmaListItem* pPrev;
    3891  VmaListItem* pNext;
    3892  T Value;
    3893 };
    3894 
    3895 // Doubly linked list.
    3896 template<typename T>
    3897 class VmaRawList
    3898 {
    3899  VMA_CLASS_NO_COPY(VmaRawList)
    3900 public:
    3901  typedef VmaListItem<T> ItemType;
    3902 
    3903  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3904  ~VmaRawList();
    3905  void Clear();
    3906 
    3907  size_t GetCount() const { return m_Count; }
    3908  bool IsEmpty() const { return m_Count == 0; }
    3909 
    3910  ItemType* Front() { return m_pFront; }
    3911  const ItemType* Front() const { return m_pFront; }
    3912  ItemType* Back() { return m_pBack; }
    3913  const ItemType* Back() const { return m_pBack; }
    3914 
    3915  ItemType* PushBack();
    3916  ItemType* PushFront();
    3917  ItemType* PushBack(const T& value);
    3918  ItemType* PushFront(const T& value);
    3919  void PopBack();
    3920  void PopFront();
    3921 
    3922  // Item can be null - it means PushBack.
    3923  ItemType* InsertBefore(ItemType* pItem);
    3924  // Item can be null - it means PushFront.
    3925  ItemType* InsertAfter(ItemType* pItem);
    3926 
    3927  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3928  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3929 
    3930  void Remove(ItemType* pItem);
    3931 
    3932 private:
    3933  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3934  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3935  ItemType* m_pFront;
    3936  ItemType* m_pBack;
    3937  size_t m_Count;
    3938 };
    3939 
    3940 template<typename T>
    3941 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3942  m_pAllocationCallbacks(pAllocationCallbacks),
    3943  m_ItemAllocator(pAllocationCallbacks, 128),
    3944  m_pFront(VMA_NULL),
    3945  m_pBack(VMA_NULL),
    3946  m_Count(0)
    3947 {
    3948 }
    3949 
    3950 template<typename T>
    3951 VmaRawList<T>::~VmaRawList()
    3952 {
    3953  // Intentionally not calling Clear, because that would be unnecessary
    3954  // computations to return all items to m_ItemAllocator as free.
    3955 }
    3956 
    3957 template<typename T>
    3958 void VmaRawList<T>::Clear()
    3959 {
    3960  if(IsEmpty() == false)
    3961  {
    3962  ItemType* pItem = m_pBack;
    3963  while(pItem != VMA_NULL)
    3964  {
    3965  ItemType* const pPrevItem = pItem->pPrev;
    3966  m_ItemAllocator.Free(pItem);
    3967  pItem = pPrevItem;
    3968  }
    3969  m_pFront = VMA_NULL;
    3970  m_pBack = VMA_NULL;
    3971  m_Count = 0;
    3972  }
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushBack()
    3977 {
    3978  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3979  pNewItem->pNext = VMA_NULL;
    3980  if(IsEmpty())
    3981  {
    3982  pNewItem->pPrev = VMA_NULL;
    3983  m_pFront = pNewItem;
    3984  m_pBack = pNewItem;
    3985  m_Count = 1;
    3986  }
    3987  else
    3988  {
    3989  pNewItem->pPrev = m_pBack;
    3990  m_pBack->pNext = pNewItem;
    3991  m_pBack = pNewItem;
    3992  ++m_Count;
    3993  }
    3994  return pNewItem;
    3995 }
    3996 
    3997 template<typename T>
    3998 VmaListItem<T>* VmaRawList<T>::PushFront()
    3999 {
    4000  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4001  pNewItem->pPrev = VMA_NULL;
    4002  if(IsEmpty())
    4003  {
    4004  pNewItem->pNext = VMA_NULL;
    4005  m_pFront = pNewItem;
    4006  m_pBack = pNewItem;
    4007  m_Count = 1;
    4008  }
    4009  else
    4010  {
    4011  pNewItem->pNext = m_pFront;
    4012  m_pFront->pPrev = pNewItem;
    4013  m_pFront = pNewItem;
    4014  ++m_Count;
    4015  }
    4016  return pNewItem;
    4017 }
    4018 
    4019 template<typename T>
    4020 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4021 {
    4022  ItemType* const pNewItem = PushBack();
    4023  pNewItem->Value = value;
    4024  return pNewItem;
    4025 }
    4026 
    4027 template<typename T>
    4028 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4029 {
    4030  ItemType* const pNewItem = PushFront();
    4031  pNewItem->Value = value;
    4032  return pNewItem;
    4033 }
    4034 
    4035 template<typename T>
    4036 void VmaRawList<T>::PopBack()
    4037 {
    4038  VMA_HEAVY_ASSERT(m_Count > 0);
    4039  ItemType* const pBackItem = m_pBack;
    4040  ItemType* const pPrevItem = pBackItem->pPrev;
    4041  if(pPrevItem != VMA_NULL)
    4042  {
    4043  pPrevItem->pNext = VMA_NULL;
    4044  }
    4045  m_pBack = pPrevItem;
    4046  m_ItemAllocator.Free(pBackItem);
    4047  --m_Count;
    4048 }
    4049 
    4050 template<typename T>
    4051 void VmaRawList<T>::PopFront()
    4052 {
    4053  VMA_HEAVY_ASSERT(m_Count > 0);
    4054  ItemType* const pFrontItem = m_pFront;
    4055  ItemType* const pNextItem = pFrontItem->pNext;
    4056  if(pNextItem != VMA_NULL)
    4057  {
    4058  pNextItem->pPrev = VMA_NULL;
    4059  }
    4060  m_pFront = pNextItem;
    4061  m_ItemAllocator.Free(pFrontItem);
    4062  --m_Count;
    4063 }
    4064 
    4065 template<typename T>
    4066 void VmaRawList<T>::Remove(ItemType* pItem)
    4067 {
    4068  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4069  VMA_HEAVY_ASSERT(m_Count > 0);
    4070 
    4071  if(pItem->pPrev != VMA_NULL)
    4072  {
    4073  pItem->pPrev->pNext = pItem->pNext;
    4074  }
    4075  else
    4076  {
    4077  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4078  m_pFront = pItem->pNext;
    4079  }
    4080 
    4081  if(pItem->pNext != VMA_NULL)
    4082  {
    4083  pItem->pNext->pPrev = pItem->pPrev;
    4084  }
    4085  else
    4086  {
    4087  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4088  m_pBack = pItem->pPrev;
    4089  }
    4090 
    4091  m_ItemAllocator.Free(pItem);
    4092  --m_Count;
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4097 {
    4098  if(pItem != VMA_NULL)
    4099  {
    4100  ItemType* const prevItem = pItem->pPrev;
    4101  ItemType* const newItem = m_ItemAllocator.Alloc();
    4102  newItem->pPrev = prevItem;
    4103  newItem->pNext = pItem;
    4104  pItem->pPrev = newItem;
    4105  if(prevItem != VMA_NULL)
    4106  {
    4107  prevItem->pNext = newItem;
    4108  }
    4109  else
    4110  {
    4111  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4112  m_pFront = newItem;
    4113  }
    4114  ++m_Count;
    4115  return newItem;
    4116  }
    4117  else
    4118  return PushBack();
    4119 }
    4120 
    4121 template<typename T>
    4122 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4123 {
    4124  if(pItem != VMA_NULL)
    4125  {
    4126  ItemType* const nextItem = pItem->pNext;
    4127  ItemType* const newItem = m_ItemAllocator.Alloc();
    4128  newItem->pNext = nextItem;
    4129  newItem->pPrev = pItem;
    4130  pItem->pNext = newItem;
    4131  if(nextItem != VMA_NULL)
    4132  {
    4133  nextItem->pPrev = newItem;
    4134  }
    4135  else
    4136  {
    4137  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4138  m_pBack = newItem;
    4139  }
    4140  ++m_Count;
    4141  return newItem;
    4142  }
    4143  else
    4144  return PushFront();
    4145 }
    4146 
    4147 template<typename T>
    4148 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4149 {
    4150  ItemType* const newItem = InsertBefore(pItem);
    4151  newItem->Value = value;
    4152  return newItem;
    4153 }
    4154 
    4155 template<typename T>
    4156 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4157 {
    4158  ItemType* const newItem = InsertAfter(pItem);
    4159  newItem->Value = value;
    4160  return newItem;
    4161 }
    4162 
    4163 template<typename T, typename AllocatorT>
    4164 class VmaList
    4165 {
    4166  VMA_CLASS_NO_COPY(VmaList)
    4167 public:
    4168  class iterator
    4169  {
    4170  public:
    4171  iterator() :
    4172  m_pList(VMA_NULL),
    4173  m_pItem(VMA_NULL)
    4174  {
    4175  }
    4176 
    4177  T& operator*() const
    4178  {
    4179  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4180  return m_pItem->Value;
    4181  }
    4182  T* operator->() const
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  return &m_pItem->Value;
    4186  }
    4187 
    4188  iterator& operator++()
    4189  {
    4190  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4191  m_pItem = m_pItem->pNext;
    4192  return *this;
    4193  }
    4194  iterator& operator--()
    4195  {
    4196  if(m_pItem != VMA_NULL)
    4197  {
    4198  m_pItem = m_pItem->pPrev;
    4199  }
    4200  else
    4201  {
    4202  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4203  m_pItem = m_pList->Back();
    4204  }
    4205  return *this;
    4206  }
    4207 
    4208  iterator operator++(int)
    4209  {
    4210  iterator result = *this;
    4211  ++*this;
    4212  return result;
    4213  }
    4214  iterator operator--(int)
    4215  {
    4216  iterator result = *this;
    4217  --*this;
    4218  return result;
    4219  }
    4220 
    4221  bool operator==(const iterator& rhs) const
    4222  {
    4223  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4224  return m_pItem == rhs.m_pItem;
    4225  }
    4226  bool operator!=(const iterator& rhs) const
    4227  {
    4228  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4229  return m_pItem != rhs.m_pItem;
    4230  }
    4231 
    4232  private:
    4233  VmaRawList<T>* m_pList;
    4234  VmaListItem<T>* m_pItem;
    4235 
    4236  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4237  m_pList(pList),
    4238  m_pItem(pItem)
    4239  {
    4240  }
    4241 
    4242  friend class VmaList<T, AllocatorT>;
    4243  };
    4244 
    4245  class const_iterator
    4246  {
    4247  public:
    4248  const_iterator() :
    4249  m_pList(VMA_NULL),
    4250  m_pItem(VMA_NULL)
    4251  {
    4252  }
    4253 
    4254  const_iterator(const iterator& src) :
    4255  m_pList(src.m_pList),
    4256  m_pItem(src.m_pItem)
    4257  {
    4258  }
    4259 
    4260  const T& operator*() const
    4261  {
    4262  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4263  return m_pItem->Value;
    4264  }
    4265  const T* operator->() const
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  return &m_pItem->Value;
    4269  }
    4270 
    4271  const_iterator& operator++()
    4272  {
    4273  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4274  m_pItem = m_pItem->pNext;
    4275  return *this;
    4276  }
    4277  const_iterator& operator--()
    4278  {
    4279  if(m_pItem != VMA_NULL)
    4280  {
    4281  m_pItem = m_pItem->pPrev;
    4282  }
    4283  else
    4284  {
    4285  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4286  m_pItem = m_pList->Back();
    4287  }
    4288  return *this;
    4289  }
    4290 
    4291  const_iterator operator++(int)
    4292  {
    4293  const_iterator result = *this;
    4294  ++*this;
    4295  return result;
    4296  }
    4297  const_iterator operator--(int)
    4298  {
    4299  const_iterator result = *this;
    4300  --*this;
    4301  return result;
    4302  }
    4303 
    4304  bool operator==(const const_iterator& rhs) const
    4305  {
    4306  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4307  return m_pItem == rhs.m_pItem;
    4308  }
    4309  bool operator!=(const const_iterator& rhs) const
    4310  {
    4311  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4312  return m_pItem != rhs.m_pItem;
    4313  }
    4314 
    4315  private:
    4316  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4317  m_pList(pList),
    4318  m_pItem(pItem)
    4319  {
    4320  }
    4321 
    4322  const VmaRawList<T>* m_pList;
    4323  const VmaListItem<T>* m_pItem;
    4324 
    4325  friend class VmaList<T, AllocatorT>;
    4326  };
    4327 
    4328  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4329 
    4330  bool empty() const { return m_RawList.IsEmpty(); }
    4331  size_t size() const { return m_RawList.GetCount(); }
    4332 
    4333  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4334  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4335 
    4336  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4337  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4338 
    4339  void clear() { m_RawList.Clear(); }
    4340  void push_back(const T& value) { m_RawList.PushBack(value); }
    4341  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4342  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4343 
    4344 private:
    4345  VmaRawList<T> m_RawList;
    4346 };
    4347 
    4348 #endif // #if VMA_USE_STL_LIST
    4349 
    4351 // class VmaMap
    4352 
    4353 // Unused in this version.
    4354 #if 0
    4355 
    4356 #if VMA_USE_STL_UNORDERED_MAP
    4357 
    4358 #define VmaPair std::pair
    4359 
    4360 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4361  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4362 
    4363 #else // #if VMA_USE_STL_UNORDERED_MAP
    4364 
    4365 template<typename T1, typename T2>
    4366 struct VmaPair
    4367 {
    4368  T1 first;
    4369  T2 second;
    4370 
    4371  VmaPair() : first(), second() { }
    4372  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4373 };
    4374 
    4375 /* Class compatible with subset of interface of std::unordered_map.
    4376 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4377 */
    4378 template<typename KeyT, typename ValueT>
    4379 class VmaMap
    4380 {
    4381 public:
    4382  typedef VmaPair<KeyT, ValueT> PairType;
    4383  typedef PairType* iterator;
    4384 
    4385  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4386 
    4387  iterator begin() { return m_Vector.begin(); }
    4388  iterator end() { return m_Vector.end(); }
    4389 
    4390  void insert(const PairType& pair);
    4391  iterator find(const KeyT& key);
    4392  void erase(iterator it);
    4393 
    4394 private:
    4395  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4396 };
    4397 
    4398 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4399 
    4400 template<typename FirstT, typename SecondT>
    4401 struct VmaPairFirstLess
    4402 {
    4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4404  {
    4405  return lhs.first < rhs.first;
    4406  }
    4407  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4408  {
    4409  return lhs.first < rhsFirst;
    4410  }
    4411 };
    4412 
    4413 template<typename KeyT, typename ValueT>
    4414 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4415 {
    4416  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4417  m_Vector.data(),
    4418  m_Vector.data() + m_Vector.size(),
    4419  pair,
    4420  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4421  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4422 }
    4423 
    4424 template<typename KeyT, typename ValueT>
    4425 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4426 {
    4427  PairType* it = VmaBinaryFindFirstNotLess(
    4428  m_Vector.data(),
    4429  m_Vector.data() + m_Vector.size(),
    4430  key,
    4431  VmaPairFirstLess<KeyT, ValueT>());
    4432  if((it != m_Vector.end()) && (it->first == key))
    4433  {
    4434  return it;
    4435  }
    4436  else
    4437  {
    4438  return m_Vector.end();
    4439  }
    4440 }
    4441 
    4442 template<typename KeyT, typename ValueT>
    4443 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4444 {
    4445  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4446 }
    4447 
    4448 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4449 
    4450 #endif // #if 0
    4451 
    4453 
    4454 class VmaDeviceMemoryBlock;
    4455 
    4456 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4457 
    4458 struct VmaAllocation_T
    4459 {
    4460  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4461 private:
    4462  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4463 
    4464  enum FLAGS
    4465  {
    4466  FLAG_USER_DATA_STRING = 0x01,
    4467  };
    4468 
    4469 public:
    4470  enum ALLOCATION_TYPE
    4471  {
    4472  ALLOCATION_TYPE_NONE,
    4473  ALLOCATION_TYPE_BLOCK,
    4474  ALLOCATION_TYPE_DEDICATED,
    4475  };
    4476 
    4477  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4478  m_Alignment(1),
    4479  m_Size(0),
    4480  m_pUserData(VMA_NULL),
    4481  m_LastUseFrameIndex(currentFrameIndex),
    4482  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4483  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4484  m_MapCount(0),
    4485  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4486  {
    4487 #if VMA_STATS_STRING_ENABLED
    4488  m_CreationFrameIndex = currentFrameIndex;
    4489  m_BufferImageUsage = 0;
    4490 #endif
    4491  }
    4492 
    4493  ~VmaAllocation_T()
    4494  {
    4495  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4496 
    4497  // Check if owned string was freed.
    4498  VMA_ASSERT(m_pUserData == VMA_NULL);
    4499  }
    4500 
    4501  void InitBlockAllocation(
    4502  VmaPool hPool,
    4503  VmaDeviceMemoryBlock* block,
    4504  VkDeviceSize offset,
    4505  VkDeviceSize alignment,
    4506  VkDeviceSize size,
    4507  VmaSuballocationType suballocationType,
    4508  bool mapped,
    4509  bool canBecomeLost)
    4510  {
    4511  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4512  VMA_ASSERT(block != VMA_NULL);
    4513  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4514  m_Alignment = alignment;
    4515  m_Size = size;
    4516  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4517  m_SuballocationType = (uint8_t)suballocationType;
    4518  m_BlockAllocation.m_hPool = hPool;
    4519  m_BlockAllocation.m_Block = block;
    4520  m_BlockAllocation.m_Offset = offset;
    4521  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4522  }
    4523 
    4524  void InitLost()
    4525  {
    4526  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4527  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4528  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4529  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4530  m_BlockAllocation.m_Block = VMA_NULL;
    4531  m_BlockAllocation.m_Offset = 0;
    4532  m_BlockAllocation.m_CanBecomeLost = true;
    4533  }
    4534 
    4535  void ChangeBlockAllocation(
    4536  VmaAllocator hAllocator,
    4537  VmaDeviceMemoryBlock* block,
    4538  VkDeviceSize offset);
    4539 
    4540  void ChangeSize(VkDeviceSize newSize);
    4541 
    4542  // pMappedData not null means allocation is created with MAPPED flag.
    4543  void InitDedicatedAllocation(
    4544  uint32_t memoryTypeIndex,
    4545  VkDeviceMemory hMemory,
    4546  VmaSuballocationType suballocationType,
    4547  void* pMappedData,
    4548  VkDeviceSize size)
    4549  {
    4550  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4551  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4552  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4553  m_Alignment = 0;
    4554  m_Size = size;
    4555  m_SuballocationType = (uint8_t)suballocationType;
    4556  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4557  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4558  m_DedicatedAllocation.m_hMemory = hMemory;
    4559  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4560  }
    4561 
    4562  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4563  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4564  VkDeviceSize GetSize() const { return m_Size; }
    4565  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4566  void* GetUserData() const { return m_pUserData; }
    4567  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4568  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4569 
    4570  VmaDeviceMemoryBlock* GetBlock() const
    4571  {
    4572  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4573  return m_BlockAllocation.m_Block;
    4574  }
    4575  VkDeviceSize GetOffset() const;
    4576  VkDeviceMemory GetMemory() const;
    4577  uint32_t GetMemoryTypeIndex() const;
    4578  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4579  void* GetMappedData() const;
    4580  bool CanBecomeLost() const;
    4581  VmaPool GetPool() const;
    4582 
    4583  uint32_t GetLastUseFrameIndex() const
    4584  {
    4585  return m_LastUseFrameIndex.load();
    4586  }
    4587  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4588  {
    4589  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4590  }
    4591  /*
    4592  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4593  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4594  - Else, returns false.
    4595 
    4596  If hAllocation is already lost, assert - you should not call it then.
    4597  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4598  */
    4599  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4600 
    4601  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4602  {
    4603  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4604  outInfo.blockCount = 1;
    4605  outInfo.allocationCount = 1;
    4606  outInfo.unusedRangeCount = 0;
    4607  outInfo.usedBytes = m_Size;
    4608  outInfo.unusedBytes = 0;
    4609  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4610  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4611  outInfo.unusedRangeSizeMax = 0;
    4612  }
    4613 
    4614  void BlockAllocMap();
    4615  void BlockAllocUnmap();
    4616  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4617  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4618 
    4619 #if VMA_STATS_STRING_ENABLED
    4620  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4621  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4622 
    4623  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4624  {
    4625  VMA_ASSERT(m_BufferImageUsage == 0);
    4626  m_BufferImageUsage = bufferImageUsage;
    4627  }
    4628 
    4629  void PrintParameters(class VmaJsonWriter& json) const;
    4630 #endif
    4631 
    4632 private:
    4633  VkDeviceSize m_Alignment;
    4634  VkDeviceSize m_Size;
    4635  void* m_pUserData;
    4636  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4637  uint8_t m_Type; // ALLOCATION_TYPE
    4638  uint8_t m_SuballocationType; // VmaSuballocationType
    4639  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4640  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4641  uint8_t m_MapCount;
    4642  uint8_t m_Flags; // enum FLAGS
    4643 
    4644  // Allocation out of VmaDeviceMemoryBlock.
    4645  struct BlockAllocation
    4646  {
    4647  VmaPool m_hPool; // Null if belongs to general memory.
    4648  VmaDeviceMemoryBlock* m_Block;
    4649  VkDeviceSize m_Offset;
    4650  bool m_CanBecomeLost;
    4651  };
    4652 
    4653  // Allocation for an object that has its own private VkDeviceMemory.
    4654  struct DedicatedAllocation
    4655  {
    4656  uint32_t m_MemoryTypeIndex;
    4657  VkDeviceMemory m_hMemory;
    4658  void* m_pMappedData; // Not null means memory is mapped.
    4659  };
    4660 
    4661  union
    4662  {
    4663  // Allocation out of VmaDeviceMemoryBlock.
    4664  BlockAllocation m_BlockAllocation;
    4665  // Allocation for an object that has its own private VkDeviceMemory.
    4666  DedicatedAllocation m_DedicatedAllocation;
    4667  };
    4668 
    4669 #if VMA_STATS_STRING_ENABLED
    4670  uint32_t m_CreationFrameIndex;
    4671  uint32_t m_BufferImageUsage; // 0 if unknown.
    4672 #endif
    4673 
    4674  void FreeUserDataString(VmaAllocator hAllocator);
    4675 };
    4676 
    4677 /*
    4678 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4679 allocated memory block or free.
    4680 */
    4681 struct VmaSuballocation
    4682 {
    4683  VkDeviceSize offset;
    4684  VkDeviceSize size;
    4685  VmaAllocation hAllocation;
    4686  VmaSuballocationType type;
    4687 };
    4688 
    4689 // Comparator for offsets.
    4690 struct VmaSuballocationOffsetLess
    4691 {
    4692  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4693  {
    4694  return lhs.offset < rhs.offset;
    4695  }
    4696 };
    4697 struct VmaSuballocationOffsetGreater
    4698 {
    4699  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4700  {
    4701  return lhs.offset > rhs.offset;
    4702  }
    4703 };
    4704 
    4705 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4706 
    4707 // Cost of one additional allocation lost, as equivalent in bytes.
    4708 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4709 
    4710 /*
    4711 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4712 
    4713 If canMakeOtherLost was false:
    4714 - item points to a FREE suballocation.
    4715 - itemsToMakeLostCount is 0.
    4716 
    4717 If canMakeOtherLost was true:
    4718 - item points to first of sequence of suballocations, which are either FREE,
    4719  or point to VmaAllocations that can become lost.
    4720 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4721  the requested allocation to succeed.
    4722 */
    4723 struct VmaAllocationRequest
    4724 {
    4725  VkDeviceSize offset;
    4726  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4727  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4728  VmaSuballocationList::iterator item;
    4729  size_t itemsToMakeLostCount;
    4730  void* customData;
    4731 
    4732  VkDeviceSize CalcCost() const
    4733  {
    4734  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4735  }
    4736 };
    4737 
    4738 /*
    4739 Data structure used for bookkeeping of allocations and unused ranges of memory
    4740 in a single VkDeviceMemory block.
    4741 */
    4742 class VmaBlockMetadata
    4743 {
    4744 public:
    4745  VmaBlockMetadata(VmaAllocator hAllocator);
    4746  virtual ~VmaBlockMetadata() { }
    4747  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4748 
    4749  // Validates all data structures inside this object. If not valid, returns false.
    4750  virtual bool Validate() const = 0;
    4751  VkDeviceSize GetSize() const { return m_Size; }
    4752  virtual size_t GetAllocationCount() const = 0;
    4753  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4754  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4755  // Returns true if this block is empty - contains only single free suballocation.
    4756  virtual bool IsEmpty() const = 0;
    4757 
    4758  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4759  // Shouldn't modify blockCount.
    4760  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4761 
    4762 #if VMA_STATS_STRING_ENABLED
    4763  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4764 #endif
    4765 
    4766  // Tries to find a place for suballocation with given parameters inside this block.
    4767  // If succeeded, fills pAllocationRequest and returns true.
    4768  // If failed, returns false.
    4769  virtual bool CreateAllocationRequest(
    4770  uint32_t currentFrameIndex,
    4771  uint32_t frameInUseCount,
    4772  VkDeviceSize bufferImageGranularity,
    4773  VkDeviceSize allocSize,
    4774  VkDeviceSize allocAlignment,
    4775  bool upperAddress,
    4776  VmaSuballocationType allocType,
    4777  bool canMakeOtherLost,
    4778  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4779  VmaAllocationRequest* pAllocationRequest) = 0;
    4780 
    4781  virtual bool MakeRequestedAllocationsLost(
    4782  uint32_t currentFrameIndex,
    4783  uint32_t frameInUseCount,
    4784  VmaAllocationRequest* pAllocationRequest) = 0;
    4785 
    4786  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4787 
    4788  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4789 
    4790  // Makes actual allocation based on request. Request must already be checked and valid.
    4791  virtual void Alloc(
    4792  const VmaAllocationRequest& request,
    4793  VmaSuballocationType type,
    4794  VkDeviceSize allocSize,
    4795  bool upperAddress,
    4796  VmaAllocation hAllocation) = 0;
    4797 
    4798  // Frees suballocation assigned to given memory region.
    4799  virtual void Free(const VmaAllocation allocation) = 0;
    4800  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4801 
    4802  // Tries to resize (grow or shrink) space for given allocation, in place.
    4803  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4804 
    4805 protected:
    4806  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4807 
    4808 #if VMA_STATS_STRING_ENABLED
    4809  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4810  VkDeviceSize unusedBytes,
    4811  size_t allocationCount,
    4812  size_t unusedRangeCount) const;
    4813  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4814  VkDeviceSize offset,
    4815  VmaAllocation hAllocation) const;
    4816  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4817  VkDeviceSize offset,
    4818  VkDeviceSize size) const;
    4819  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4820 #endif
    4821 
    4822 private:
    4823  VkDeviceSize m_Size;
    4824  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4825 };
    4826 
    4827 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4828  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4829  return false; \
    4830  } } while(false)
    4831 
    4832 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4833 {
    4834  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4835 public:
    4836  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4837  virtual ~VmaBlockMetadata_Generic();
    4838  virtual void Init(VkDeviceSize size);
    4839 
    4840  virtual bool Validate() const;
    4841  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4842  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4843  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4844  virtual bool IsEmpty() const;
    4845 
    4846  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4847  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4848 
    4849 #if VMA_STATS_STRING_ENABLED
    4850  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4851 #endif
    4852 
    4853  virtual bool CreateAllocationRequest(
    4854  uint32_t currentFrameIndex,
    4855  uint32_t frameInUseCount,
    4856  VkDeviceSize bufferImageGranularity,
    4857  VkDeviceSize allocSize,
    4858  VkDeviceSize allocAlignment,
    4859  bool upperAddress,
    4860  VmaSuballocationType allocType,
    4861  bool canMakeOtherLost,
    4862  uint32_t strategy,
    4863  VmaAllocationRequest* pAllocationRequest);
    4864 
    4865  virtual bool MakeRequestedAllocationsLost(
    4866  uint32_t currentFrameIndex,
    4867  uint32_t frameInUseCount,
    4868  VmaAllocationRequest* pAllocationRequest);
    4869 
    4870  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4871 
    4872  virtual VkResult CheckCorruption(const void* pBlockData);
    4873 
    4874  virtual void Alloc(
    4875  const VmaAllocationRequest& request,
    4876  VmaSuballocationType type,
    4877  VkDeviceSize allocSize,
    4878  bool upperAddress,
    4879  VmaAllocation hAllocation);
    4880 
    4881  virtual void Free(const VmaAllocation allocation);
    4882  virtual void FreeAtOffset(VkDeviceSize offset);
    4883 
    4884  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4885 
    4886 private:
    4887  uint32_t m_FreeCount;
    4888  VkDeviceSize m_SumFreeSize;
    4889  VmaSuballocationList m_Suballocations;
    4890  // Suballocations that are free and have size greater than certain threshold.
    4891  // Sorted by size, ascending.
    4892  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4893 
    4894  bool ValidateFreeSuballocationList() const;
    4895 
    4896  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4897  // If yes, fills pOffset and returns true. If no, returns false.
    4898  bool CheckAllocation(
    4899  uint32_t currentFrameIndex,
    4900  uint32_t frameInUseCount,
    4901  VkDeviceSize bufferImageGranularity,
    4902  VkDeviceSize allocSize,
    4903  VkDeviceSize allocAlignment,
    4904  VmaSuballocationType allocType,
    4905  VmaSuballocationList::const_iterator suballocItem,
    4906  bool canMakeOtherLost,
    4907  VkDeviceSize* pOffset,
    4908  size_t* itemsToMakeLostCount,
    4909  VkDeviceSize* pSumFreeSize,
    4910  VkDeviceSize* pSumItemSize) const;
    4911  // Given free suballocation, it merges it with following one, which must also be free.
    4912  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4913  // Releases given suballocation, making it free.
    4914  // Merges it with adjacent free suballocations if applicable.
    4915  // Returns iterator to new free suballocation at this place.
    4916  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4917  // Given free suballocation, it inserts it into sorted list of
    4918  // m_FreeSuballocationsBySize if it's suitable.
    4919  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4920  // Given free suballocation, it removes it from sorted list of
    4921  // m_FreeSuballocationsBySize if it's suitable.
    4922  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4923 };
    4924 
    4925 /*
    4926 Allocations and their references in internal data structure look like this:
    4927 
    4928 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4929 
    4930  0 +-------+
    4931  | |
    4932  | |
    4933  | |
    4934  +-------+
    4935  | Alloc | 1st[m_1stNullItemsBeginCount]
    4936  +-------+
    4937  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4938  +-------+
    4939  | ... |
    4940  +-------+
    4941  | Alloc | 1st[1st.size() - 1]
    4942  +-------+
    4943  | |
    4944  | |
    4945  | |
    4946 GetSize() +-------+
    4947 
    4948 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4949 
    4950  0 +-------+
    4951  | Alloc | 2nd[0]
    4952  +-------+
    4953  | Alloc | 2nd[1]
    4954  +-------+
    4955  | ... |
    4956  +-------+
    4957  | Alloc | 2nd[2nd.size() - 1]
    4958  +-------+
    4959  | |
    4960  | |
    4961  | |
    4962  +-------+
    4963  | Alloc | 1st[m_1stNullItemsBeginCount]
    4964  +-------+
    4965  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4966  +-------+
    4967  | ... |
    4968  +-------+
    4969  | Alloc | 1st[1st.size() - 1]
    4970  +-------+
    4971  | |
    4972 GetSize() +-------+
    4973 
    4974 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4975 
    4976  0 +-------+
    4977  | |
    4978  | |
    4979  | |
    4980  +-------+
    4981  | Alloc | 1st[m_1stNullItemsBeginCount]
    4982  +-------+
    4983  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4984  +-------+
    4985  | ... |
    4986  +-------+
    4987  | Alloc | 1st[1st.size() - 1]
    4988  +-------+
    4989  | |
    4990  | |
    4991  | |
    4992  +-------+
    4993  | Alloc | 2nd[2nd.size() - 1]
    4994  +-------+
    4995  | ... |
    4996  +-------+
    4997  | Alloc | 2nd[1]
    4998  +-------+
    4999  | Alloc | 2nd[0]
    5000 GetSize() +-------+
    5001 
    5002 */
    5003 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5004 {
    5005  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5006 public:
    5007  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5008  virtual ~VmaBlockMetadata_Linear();
    5009  virtual void Init(VkDeviceSize size);
    5010 
    5011  virtual bool Validate() const;
    5012  virtual size_t GetAllocationCount() const;
    5013  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5014  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5015  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5016 
    5017  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5018  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5019 
    5020 #if VMA_STATS_STRING_ENABLED
    5021  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5022 #endif
    5023 
    5024  virtual bool CreateAllocationRequest(
    5025  uint32_t currentFrameIndex,
    5026  uint32_t frameInUseCount,
    5027  VkDeviceSize bufferImageGranularity,
    5028  VkDeviceSize allocSize,
    5029  VkDeviceSize allocAlignment,
    5030  bool upperAddress,
    5031  VmaSuballocationType allocType,
    5032  bool canMakeOtherLost,
    5033  uint32_t strategy,
    5034  VmaAllocationRequest* pAllocationRequest);
    5035 
    5036  virtual bool MakeRequestedAllocationsLost(
    5037  uint32_t currentFrameIndex,
    5038  uint32_t frameInUseCount,
    5039  VmaAllocationRequest* pAllocationRequest);
    5040 
    5041  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5042 
    5043  virtual VkResult CheckCorruption(const void* pBlockData);
    5044 
    5045  virtual void Alloc(
    5046  const VmaAllocationRequest& request,
    5047  VmaSuballocationType type,
    5048  VkDeviceSize allocSize,
    5049  bool upperAddress,
    5050  VmaAllocation hAllocation);
    5051 
    5052  virtual void Free(const VmaAllocation allocation);
    5053  virtual void FreeAtOffset(VkDeviceSize offset);
    5054 
    5055 private:
    5056  /*
    5057  There are two suballocation vectors, used in ping-pong way.
    5058  The one with index m_1stVectorIndex is called 1st.
    5059  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5060  2nd can be non-empty only when 1st is not empty.
    5061  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5062  */
    5063  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5064 
    5065  enum SECOND_VECTOR_MODE
    5066  {
    5067  SECOND_VECTOR_EMPTY,
    5068  /*
    5069  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5070  all have smaller offset.
    5071  */
    5072  SECOND_VECTOR_RING_BUFFER,
    5073  /*
    5074  Suballocations in 2nd vector are upper side of double stack.
    5075  They all have offsets higher than those in 1st vector.
    5076  Top of this stack means smaller offsets, but higher indices in this vector.
    5077  */
    5078  SECOND_VECTOR_DOUBLE_STACK,
    5079  };
    5080 
    5081  VkDeviceSize m_SumFreeSize;
    5082  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5083  uint32_t m_1stVectorIndex;
    5084  SECOND_VECTOR_MODE m_2ndVectorMode;
    5085 
    5086  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5087  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5088  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5089  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5090 
    5091  // Number of items in 1st vector with hAllocation = null at the beginning.
    5092  size_t m_1stNullItemsBeginCount;
    5093  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5094  size_t m_1stNullItemsMiddleCount;
    5095  // Number of items in 2nd vector with hAllocation = null.
    5096  size_t m_2ndNullItemsCount;
    5097 
    5098  bool ShouldCompact1st() const;
    5099  void CleanupAfterFree();
    5100 };
    5101 
    5102 /*
    5103 - GetSize() is the original size of allocated memory block.
    5104 - m_UsableSize is this size aligned down to a power of two.
    5105  All allocations and calculations happen relative to m_UsableSize.
    5106 - GetUnusableSize() is the difference between them.
    5107  It is repoted as separate, unused range, not available for allocations.
    5108 
    5109 Node at level 0 has size = m_UsableSize.
    5110 Each next level contains nodes with size 2 times smaller than current level.
    5111 m_LevelCount is the maximum number of levels to use in the current object.
    5112 */
    5113 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5114 {
    5115  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5116 public:
    5117  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5118  virtual ~VmaBlockMetadata_Buddy();
    5119  virtual void Init(VkDeviceSize size);
    5120 
    5121  virtual bool Validate() const;
    5122  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5123  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5124  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5125  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5126 
    5127  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5128  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5129 
    5130 #if VMA_STATS_STRING_ENABLED
    5131  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5132 #endif
    5133 
    5134  virtual bool CreateAllocationRequest(
    5135  uint32_t currentFrameIndex,
    5136  uint32_t frameInUseCount,
    5137  VkDeviceSize bufferImageGranularity,
    5138  VkDeviceSize allocSize,
    5139  VkDeviceSize allocAlignment,
    5140  bool upperAddress,
    5141  VmaSuballocationType allocType,
    5142  bool canMakeOtherLost,
    5143  uint32_t strategy,
    5144  VmaAllocationRequest* pAllocationRequest);
    5145 
    5146  virtual bool MakeRequestedAllocationsLost(
    5147  uint32_t currentFrameIndex,
    5148  uint32_t frameInUseCount,
    5149  VmaAllocationRequest* pAllocationRequest);
    5150 
    5151  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5152 
    5153  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5154 
    5155  virtual void Alloc(
    5156  const VmaAllocationRequest& request,
    5157  VmaSuballocationType type,
    5158  VkDeviceSize allocSize,
    5159  bool upperAddress,
    5160  VmaAllocation hAllocation);
    5161 
    5162  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5163  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5164 
    5165 private:
    5166  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5167  static const size_t MAX_LEVELS = 30;
    5168 
    5169  struct ValidationContext
    5170  {
    5171  size_t calculatedAllocationCount;
    5172  size_t calculatedFreeCount;
    5173  VkDeviceSize calculatedSumFreeSize;
    5174 
    5175  ValidationContext() :
    5176  calculatedAllocationCount(0),
    5177  calculatedFreeCount(0),
    5178  calculatedSumFreeSize(0) { }
    5179  };
    5180 
    5181  struct Node
    5182  {
    5183  VkDeviceSize offset;
    5184  enum TYPE
    5185  {
    5186  TYPE_FREE,
    5187  TYPE_ALLOCATION,
    5188  TYPE_SPLIT,
    5189  TYPE_COUNT
    5190  } type;
    5191  Node* parent;
    5192  Node* buddy;
    5193 
    5194  union
    5195  {
    5196  struct
    5197  {
    5198  Node* prev;
    5199  Node* next;
    5200  } free;
    5201  struct
    5202  {
    5203  VmaAllocation alloc;
    5204  } allocation;
    5205  struct
    5206  {
    5207  Node* leftChild;
    5208  } split;
    5209  };
    5210  };
    5211 
    5212  // Size of the memory block aligned down to a power of two.
    5213  VkDeviceSize m_UsableSize;
    5214  uint32_t m_LevelCount;
    5215 
    5216  Node* m_Root;
    5217  struct {
    5218  Node* front;
    5219  Node* back;
    5220  } m_FreeList[MAX_LEVELS];
    5221  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5222  size_t m_AllocationCount;
    5223  // Number of nodes in the tree with type == TYPE_FREE.
    5224  size_t m_FreeCount;
    5225  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5226  VkDeviceSize m_SumFreeSize;
    5227 
    5228  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5229  void DeleteNode(Node* node);
    5230  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5231  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5232  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5233  // Alloc passed just for validation. Can be null.
    5234  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5235  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5236  // Adds node to the front of FreeList at given level.
    5237  // node->type must be FREE.
    5238  // node->free.prev, next can be undefined.
    5239  void AddToFreeListFront(uint32_t level, Node* node);
    5240  // Removes node from FreeList at given level.
    5241  // node->type must be FREE.
    5242  // node->free.prev, next stay untouched.
    5243  void RemoveFromFreeList(uint32_t level, Node* node);
    5244 
    5245 #if VMA_STATS_STRING_ENABLED
    5246  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5247 #endif
    5248 };
    5249 
    5250 /*
    5251 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5252 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5253 
    5254 Thread-safety: This class must be externally synchronized.
    5255 */
    5256 class VmaDeviceMemoryBlock
    5257 {
    5258  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5259 public:
    5260  VmaBlockMetadata* m_pMetadata;
    5261 
    5262  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5263 
    5264  ~VmaDeviceMemoryBlock()
    5265  {
    5266  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5267  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5268  }
    5269 
    5270  // Always call after construction.
    5271  void Init(
    5272  VmaAllocator hAllocator,
    5273  uint32_t newMemoryTypeIndex,
    5274  VkDeviceMemory newMemory,
    5275  VkDeviceSize newSize,
    5276  uint32_t id,
    5277  uint32_t algorithm);
    5278  // Always call before destruction.
    5279  void Destroy(VmaAllocator allocator);
    5280 
    5281  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5282  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5283  uint32_t GetId() const { return m_Id; }
    5284  void* GetMappedData() const { return m_pMappedData; }
    5285 
    5286  // Validates all data structures inside this object. If not valid, returns false.
    5287  bool Validate() const;
    5288 
    5289  VkResult CheckCorruption(VmaAllocator hAllocator);
    5290 
    5291  // ppData can be null.
    5292  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5293  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5294 
    5295  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5297 
    5298  VkResult BindBufferMemory(
    5299  const VmaAllocator hAllocator,
    5300  const VmaAllocation hAllocation,
    5301  VkBuffer hBuffer);
    5302  VkResult BindImageMemory(
    5303  const VmaAllocator hAllocator,
    5304  const VmaAllocation hAllocation,
    5305  VkImage hImage);
    5306 
    5307 private:
    5308  uint32_t m_MemoryTypeIndex;
    5309  uint32_t m_Id;
    5310  VkDeviceMemory m_hMemory;
    5311 
    5312  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5313  // Also protects m_MapCount, m_pMappedData.
    5314  VMA_MUTEX m_Mutex;
    5315  uint32_t m_MapCount;
    5316  void* m_pMappedData;
    5317 };
    5318 
    5319 struct VmaPointerLess
    5320 {
    5321  bool operator()(const void* lhs, const void* rhs) const
    5322  {
    5323  return lhs < rhs;
    5324  }
    5325 };
    5326 
    5327 class VmaDefragmentator;
    5328 
    5329 /*
    5330 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5331 Vulkan memory type.
    5332 
    5333 Synchronized internally with a mutex.
    5334 */
    5335 struct VmaBlockVector
    5336 {
    5337  VMA_CLASS_NO_COPY(VmaBlockVector)
    5338 public:
    5339  VmaBlockVector(
    5340  VmaAllocator hAllocator,
    5341  uint32_t memoryTypeIndex,
    5342  VkDeviceSize preferredBlockSize,
    5343  size_t minBlockCount,
    5344  size_t maxBlockCount,
    5345  VkDeviceSize bufferImageGranularity,
    5346  uint32_t frameInUseCount,
    5347  bool isCustomPool,
    5348  bool explicitBlockSize,
    5349  uint32_t algorithm);
    5350  ~VmaBlockVector();
    5351 
    5352  VkResult CreateMinBlocks();
    5353 
    5354  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5355  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5356  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5357  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5358  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5359 
    5360  void GetPoolStats(VmaPoolStats* pStats);
    5361 
    5362  bool IsEmpty() const { return m_Blocks.empty(); }
    5363  bool IsCorruptionDetectionEnabled() const;
    5364 
    5365  VkResult Allocate(
    5366  VmaPool hCurrentPool,
    5367  uint32_t currentFrameIndex,
    5368  VkDeviceSize size,
    5369  VkDeviceSize alignment,
    5370  const VmaAllocationCreateInfo& createInfo,
    5371  VmaSuballocationType suballocType,
    5372  VmaAllocation* pAllocation);
    5373 
    5374  void Free(
    5375  VmaAllocation hAllocation);
    5376 
    5377  // Adds statistics of this BlockVector to pStats.
    5378  void AddStats(VmaStats* pStats);
    5379 
    5380 #if VMA_STATS_STRING_ENABLED
    5381  void PrintDetailedMap(class VmaJsonWriter& json);
    5382 #endif
    5383 
    5384  void MakePoolAllocationsLost(
    5385  uint32_t currentFrameIndex,
    5386  size_t* pLostAllocationCount);
    5387  VkResult CheckCorruption();
    5388 
    5389  VmaDefragmentator* EnsureDefragmentator(
    5390  VmaAllocator hAllocator,
    5391  uint32_t currentFrameIndex);
    5392 
    5393  VkResult Defragment(
    5394  VmaDefragmentationStats* pDefragmentationStats,
    5395  VkDeviceSize& maxBytesToMove,
    5396  uint32_t& maxAllocationsToMove);
    5397 
    5398  void DestroyDefragmentator();
    5399 
    5400 private:
    5401  friend class VmaDefragmentator;
    5402 
    5403  const VmaAllocator m_hAllocator;
    5404  const uint32_t m_MemoryTypeIndex;
    5405  const VkDeviceSize m_PreferredBlockSize;
    5406  const size_t m_MinBlockCount;
    5407  const size_t m_MaxBlockCount;
    5408  const VkDeviceSize m_BufferImageGranularity;
    5409  const uint32_t m_FrameInUseCount;
    5410  const bool m_IsCustomPool;
    5411  const bool m_ExplicitBlockSize;
    5412  const uint32_t m_Algorithm;
    5413  bool m_HasEmptyBlock;
    5414  VMA_MUTEX m_Mutex;
    5415  // Incrementally sorted by sumFreeSize, ascending.
    5416  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5417  /* There can be at most one allocation that is completely empty - a
    5418  hysteresis to avoid pessimistic case of alternating creation and destruction
    5419  of a VkDeviceMemory. */
    5420  VmaDefragmentator* m_pDefragmentator;
    5421  uint32_t m_NextBlockId;
    5422 
    5423  VkDeviceSize CalcMaxBlockSize() const;
    5424 
    5425  // Finds and removes given block from vector.
    5426  void Remove(VmaDeviceMemoryBlock* pBlock);
    5427 
    5428  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5429  // after this call.
    5430  void IncrementallySortBlocks();
    5431 
    5432  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5433  VkResult AllocateFromBlock(
    5434  VmaDeviceMemoryBlock* pBlock,
    5435  VmaPool hCurrentPool,
    5436  uint32_t currentFrameIndex,
    5437  VkDeviceSize size,
    5438  VkDeviceSize alignment,
    5439  VmaAllocationCreateFlags allocFlags,
    5440  void* pUserData,
    5441  VmaSuballocationType suballocType,
    5442  uint32_t strategy,
    5443  VmaAllocation* pAllocation);
    5444 
    5445  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5446 };
    5447 
    5448 struct VmaPool_T
    5449 {
    5450  VMA_CLASS_NO_COPY(VmaPool_T)
    5451 public:
    5452  VmaBlockVector m_BlockVector;
    5453 
    5454  VmaPool_T(
    5455  VmaAllocator hAllocator,
    5456  const VmaPoolCreateInfo& createInfo,
    5457  VkDeviceSize preferredBlockSize);
    5458  ~VmaPool_T();
    5459 
    5460  uint32_t GetId() const { return m_Id; }
    5461  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5462 
    5463 #if VMA_STATS_STRING_ENABLED
    5464  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5465 #endif
    5466 
    5467 private:
    5468  uint32_t m_Id;
    5469 };
    5470 
    5471 class VmaDefragmentator
    5472 {
    5473  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5474 private:
    5475  const VmaAllocator m_hAllocator;
    5476  VmaBlockVector* const m_pBlockVector;
    5477  uint32_t m_CurrentFrameIndex;
    5478  VkDeviceSize m_BytesMoved;
    5479  uint32_t m_AllocationsMoved;
    5480 
    5481  struct AllocationInfo
    5482  {
    5483  VmaAllocation m_hAllocation;
    5484  VkBool32* m_pChanged;
    5485 
    5486  AllocationInfo() :
    5487  m_hAllocation(VK_NULL_HANDLE),
    5488  m_pChanged(VMA_NULL)
    5489  {
    5490  }
    5491  };
    5492 
    5493  struct AllocationInfoSizeGreater
    5494  {
    5495  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5496  {
    5497  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5498  }
    5499  };
    5500 
    5501  // Used between AddAllocation and Defragment.
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  struct BlockInfo
    5505  {
    5506  VmaDeviceMemoryBlock* m_pBlock;
    5507  bool m_HasNonMovableAllocations;
    5508  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5509 
    5510  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5511  m_pBlock(VMA_NULL),
    5512  m_HasNonMovableAllocations(true),
    5513  m_Allocations(pAllocationCallbacks),
    5514  m_pMappedDataForDefragmentation(VMA_NULL)
    5515  {
    5516  }
    5517 
    5518  void CalcHasNonMovableAllocations()
    5519  {
    5520  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5521  const size_t defragmentAllocCount = m_Allocations.size();
    5522  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5523  }
    5524 
    5525  void SortAllocationsBySizeDescecnding()
    5526  {
    5527  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5528  }
    5529 
    5530  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5531  void Unmap(VmaAllocator hAllocator);
    5532 
    5533  private:
    5534  // Not null if mapped for defragmentation only, not originally mapped.
    5535  void* m_pMappedDataForDefragmentation;
    5536  };
    5537 
    5538  struct BlockPointerLess
    5539  {
    5540  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5541  {
    5542  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5543  }
    5544  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5545  {
    5546  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5547  }
    5548  };
    5549 
    5550  // 1. Blocks with some non-movable allocations go first.
    5551  // 2. Blocks with smaller sumFreeSize go first.
    5552  struct BlockInfoCompareMoveDestination
    5553  {
    5554  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5555  {
    5556  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5557  {
    5558  return true;
    5559  }
    5560  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5561  {
    5562  return false;
    5563  }
    5564  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5565  {
    5566  return true;
    5567  }
    5568  return false;
    5569  }
    5570  };
    5571 
    5572  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5573  BlockInfoVector m_Blocks;
    5574 
    5575  VkResult DefragmentRound(
    5576  VkDeviceSize maxBytesToMove,
    5577  uint32_t maxAllocationsToMove);
    5578 
    5579  static bool MoveMakesSense(
    5580  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5581  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5582 
    5583 public:
    5584  VmaDefragmentator(
    5585  VmaAllocator hAllocator,
    5586  VmaBlockVector* pBlockVector,
    5587  uint32_t currentFrameIndex);
    5588 
    5589  ~VmaDefragmentator();
    5590 
    5591  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5592  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5593 
    5594  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5595 
    5596  VkResult Defragment(
    5597  VkDeviceSize maxBytesToMove,
    5598  uint32_t maxAllocationsToMove);
    5599 };
    5600 
    5601 #if VMA_RECORDING_ENABLED
    5602 
    5603 class VmaRecorder
    5604 {
    5605 public:
    5606  VmaRecorder();
    5607  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5608  void WriteConfiguration(
    5609  const VkPhysicalDeviceProperties& devProps,
    5610  const VkPhysicalDeviceMemoryProperties& memProps,
    5611  bool dedicatedAllocationExtensionEnabled);
    5612  ~VmaRecorder();
    5613 
    5614  void RecordCreateAllocator(uint32_t frameIndex);
    5615  void RecordDestroyAllocator(uint32_t frameIndex);
    5616  void RecordCreatePool(uint32_t frameIndex,
    5617  const VmaPoolCreateInfo& createInfo,
    5618  VmaPool pool);
    5619  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5620  void RecordAllocateMemory(uint32_t frameIndex,
    5621  const VkMemoryRequirements& vkMemReq,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5631  const VkMemoryRequirements& vkMemReq,
    5632  bool requiresDedicatedAllocation,
    5633  bool prefersDedicatedAllocation,
    5634  const VmaAllocationCreateInfo& createInfo,
    5635  VmaAllocation allocation);
    5636  void RecordFreeMemory(uint32_t frameIndex,
    5637  VmaAllocation allocation);
    5638  void RecordResizeAllocation(
    5639  uint32_t frameIndex,
    5640  VmaAllocation allocation,
    5641  VkDeviceSize newSize);
    5642  void RecordSetAllocationUserData(uint32_t frameIndex,
    5643  VmaAllocation allocation,
    5644  const void* pUserData);
    5645  void RecordCreateLostAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation);
    5647  void RecordMapMemory(uint32_t frameIndex,
    5648  VmaAllocation allocation);
    5649  void RecordUnmapMemory(uint32_t frameIndex,
    5650  VmaAllocation allocation);
    5651  void RecordFlushAllocation(uint32_t frameIndex,
    5652  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5653  void RecordInvalidateAllocation(uint32_t frameIndex,
    5654  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5655  void RecordCreateBuffer(uint32_t frameIndex,
    5656  const VkBufferCreateInfo& bufCreateInfo,
    5657  const VmaAllocationCreateInfo& allocCreateInfo,
    5658  VmaAllocation allocation);
    5659  void RecordCreateImage(uint32_t frameIndex,
    5660  const VkImageCreateInfo& imageCreateInfo,
    5661  const VmaAllocationCreateInfo& allocCreateInfo,
    5662  VmaAllocation allocation);
    5663  void RecordDestroyBuffer(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordDestroyImage(uint32_t frameIndex,
    5666  VmaAllocation allocation);
    5667  void RecordTouchAllocation(uint32_t frameIndex,
    5668  VmaAllocation allocation);
    5669  void RecordGetAllocationInfo(uint32_t frameIndex,
    5670  VmaAllocation allocation);
    5671  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5672  VmaPool pool);
    5673 
    5674 private:
    5675  struct CallParams
    5676  {
    5677  uint32_t threadId;
    5678  double time;
    5679  };
    5680 
    5681  class UserDataString
    5682  {
    5683  public:
    5684  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5685  const char* GetString() const { return m_Str; }
    5686 
    5687  private:
    5688  char m_PtrStr[17];
    5689  const char* m_Str;
    5690  };
    5691 
    5692  bool m_UseMutex;
    5693  VmaRecordFlags m_Flags;
    5694  FILE* m_File;
    5695  VMA_MUTEX m_FileMutex;
    5696  int64_t m_Freq;
    5697  int64_t m_StartCounter;
    5698 
    5699  void GetBasicParams(CallParams& outParams);
    5700  void Flush();
    5701 };
    5702 
    5703 #endif // #if VMA_RECORDING_ENABLED
    5704 
    5705 // Main allocator object.
    5706 struct VmaAllocator_T
    5707 {
    5708  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5709 public:
    5710  bool m_UseMutex;
    5711  bool m_UseKhrDedicatedAllocation;
    5712  VkDevice m_hDevice;
    5713  bool m_AllocationCallbacksSpecified;
    5714  VkAllocationCallbacks m_AllocationCallbacks;
    5715  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5716 
    5717  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5718  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5719  VMA_MUTEX m_HeapSizeLimitMutex;
    5720 
    5721  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5722  VkPhysicalDeviceMemoryProperties m_MemProps;
    5723 
    5724  // Default pools.
    5725  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5726 
    5727  // Each vector is sorted by memory (handle value).
    5728  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5729  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5730  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5731 
    5732  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5734  ~VmaAllocator_T();
    5735 
    5736  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5737  {
    5738  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5739  }
    5740  const VmaVulkanFunctions& GetVulkanFunctions() const
    5741  {
    5742  return m_VulkanFunctions;
    5743  }
    5744 
    5745  VkDeviceSize GetBufferImageGranularity() const
    5746  {
    5747  return VMA_MAX(
    5748  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5749  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5750  }
    5751 
    5752  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5753  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5754 
    5755  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5756  {
    5757  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5758  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5759  }
    5760  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5761  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5762  {
    5763  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5764  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5765  }
    5766  // Minimum alignment for all allocations in specific memory type.
    5767  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5768  {
    5769  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5770  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5771  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5772  }
    5773 
    5774  bool IsIntegratedGpu() const
    5775  {
    5776  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5777  }
    5778 
    5779 #if VMA_RECORDING_ENABLED
    5780  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5781 #endif
    5782 
    5783  void GetBufferMemoryRequirements(
    5784  VkBuffer hBuffer,
    5785  VkMemoryRequirements& memReq,
    5786  bool& requiresDedicatedAllocation,
    5787  bool& prefersDedicatedAllocation) const;
    5788  void GetImageMemoryRequirements(
    5789  VkImage hImage,
    5790  VkMemoryRequirements& memReq,
    5791  bool& requiresDedicatedAllocation,
    5792  bool& prefersDedicatedAllocation) const;
    5793 
    5794  // Main allocation function.
    5795  VkResult AllocateMemory(
    5796  const VkMemoryRequirements& vkMemReq,
    5797  bool requiresDedicatedAllocation,
    5798  bool prefersDedicatedAllocation,
    5799  VkBuffer dedicatedBuffer,
    5800  VkImage dedicatedImage,
    5801  const VmaAllocationCreateInfo& createInfo,
    5802  VmaSuballocationType suballocType,
    5803  VmaAllocation* pAllocation);
    5804 
    5805  // Main deallocation function.
    5806  void FreeMemory(const VmaAllocation allocation);
    5807 
    5808  VkResult ResizeAllocation(
    5809  const VmaAllocation alloc,
    5810  VkDeviceSize newSize);
    5811 
    5812  void CalculateStats(VmaStats* pStats);
    5813 
    5814 #if VMA_STATS_STRING_ENABLED
    5815  void PrintDetailedMap(class VmaJsonWriter& json);
    5816 #endif
    5817 
    5818  VkResult Defragment(
    5819  VmaAllocation* pAllocations,
    5820  size_t allocationCount,
    5821  VkBool32* pAllocationsChanged,
    5822  const VmaDefragmentationInfo* pDefragmentationInfo,
    5823  VmaDefragmentationStats* pDefragmentationStats);
    5824 
    5825  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5826  bool TouchAllocation(VmaAllocation hAllocation);
    5827 
    5828  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5829  void DestroyPool(VmaPool pool);
    5830  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5831 
    5832  void SetCurrentFrameIndex(uint32_t frameIndex);
    5833  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5834 
    5835  void MakePoolAllocationsLost(
    5836  VmaPool hPool,
    5837  size_t* pLostAllocationCount);
    5838  VkResult CheckPoolCorruption(VmaPool hPool);
    5839  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5840 
    5841  void CreateLostAllocation(VmaAllocation* pAllocation);
    5842 
    5843  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5844  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5845 
    5846  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5847  void Unmap(VmaAllocation hAllocation);
    5848 
    5849  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5850  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5851 
    5852  void FlushOrInvalidateAllocation(
    5853  VmaAllocation hAllocation,
    5854  VkDeviceSize offset, VkDeviceSize size,
    5855  VMA_CACHE_OPERATION op);
    5856 
    5857  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5858 
    5859 private:
    5860  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5861 
    5862  VkPhysicalDevice m_PhysicalDevice;
    5863  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5864 
    5865  VMA_MUTEX m_PoolsMutex;
    5866  // Protected by m_PoolsMutex. Sorted by pointer value.
    5867  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5868  uint32_t m_NextPoolId;
    5869 
    5870  VmaVulkanFunctions m_VulkanFunctions;
    5871 
    5872 #if VMA_RECORDING_ENABLED
    5873  VmaRecorder* m_pRecorder;
    5874 #endif
    5875 
    5876  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5877 
    5878  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5879 
    5880  VkResult AllocateMemoryOfType(
    5881  VkDeviceSize size,
    5882  VkDeviceSize alignment,
    5883  bool dedicatedAllocation,
    5884  VkBuffer dedicatedBuffer,
    5885  VkImage dedicatedImage,
    5886  const VmaAllocationCreateInfo& createInfo,
    5887  uint32_t memTypeIndex,
    5888  VmaSuballocationType suballocType,
    5889  VmaAllocation* pAllocation);
    5890 
    5891  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5892  VkResult AllocateDedicatedMemory(
    5893  VkDeviceSize size,
    5894  VmaSuballocationType suballocType,
    5895  uint32_t memTypeIndex,
    5896  bool map,
    5897  bool isUserDataString,
    5898  void* pUserData,
    5899  VkBuffer dedicatedBuffer,
    5900  VkImage dedicatedImage,
    5901  VmaAllocation* pAllocation);
    5902 
    5903  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5904  void FreeDedicatedMemory(VmaAllocation allocation);
    5905 };
    5906 
    5908 // Memory allocation #2 after VmaAllocator_T definition
    5909 
    5910 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5911 {
    5912  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5913 }
    5914 
    5915 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5916 {
    5917  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocate(VmaAllocator hAllocator)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5928 {
    5929  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5930 }
    5931 
    5932 template<typename T>
    5933 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5934 {
    5935  if(ptr != VMA_NULL)
    5936  {
    5937  ptr->~T();
    5938  VmaFree(hAllocator, ptr);
    5939  }
    5940 }
    5941 
    5942 template<typename T>
    5943 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5944 {
    5945  if(ptr != VMA_NULL)
    5946  {
    5947  for(size_t i = count; i--; )
    5948  ptr[i].~T();
    5949  VmaFree(hAllocator, ptr);
    5950  }
    5951 }
    5952 
    5954 // VmaStringBuilder
    5955 
    5956 #if VMA_STATS_STRING_ENABLED
    5957 
    5958 class VmaStringBuilder
    5959 {
    5960 public:
    5961  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5962  size_t GetLength() const { return m_Data.size(); }
    5963  const char* GetData() const { return m_Data.data(); }
    5964 
    5965  void Add(char ch) { m_Data.push_back(ch); }
    5966  void Add(const char* pStr);
    5967  void AddNewLine() { Add('\n'); }
    5968  void AddNumber(uint32_t num);
    5969  void AddNumber(uint64_t num);
    5970  void AddPointer(const void* ptr);
    5971 
    5972 private:
    5973  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5974 };
    5975 
    5976 void VmaStringBuilder::Add(const char* pStr)
    5977 {
    5978  const size_t strLen = strlen(pStr);
    5979  if(strLen > 0)
    5980  {
    5981  const size_t oldCount = m_Data.size();
    5982  m_Data.resize(oldCount + strLen);
    5983  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5984  }
    5985 }
    5986 
    5987 void VmaStringBuilder::AddNumber(uint32_t num)
    5988 {
    5989  char buf[11];
    5990  VmaUint32ToStr(buf, sizeof(buf), num);
    5991  Add(buf);
    5992 }
    5993 
    5994 void VmaStringBuilder::AddNumber(uint64_t num)
    5995 {
    5996  char buf[21];
    5997  VmaUint64ToStr(buf, sizeof(buf), num);
    5998  Add(buf);
    5999 }
    6000 
    6001 void VmaStringBuilder::AddPointer(const void* ptr)
    6002 {
    6003  char buf[21];
    6004  VmaPtrToStr(buf, sizeof(buf), ptr);
    6005  Add(buf);
    6006 }
    6007 
    6008 #endif // #if VMA_STATS_STRING_ENABLED
    6009 
    6011 // VmaJsonWriter
    6012 
    6013 #if VMA_STATS_STRING_ENABLED
    6014 
    6015 class VmaJsonWriter
    6016 {
    6017  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6018 public:
    6019  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6020  ~VmaJsonWriter();
    6021 
    6022  void BeginObject(bool singleLine = false);
    6023  void EndObject();
    6024 
    6025  void BeginArray(bool singleLine = false);
    6026  void EndArray();
    6027 
    6028  void WriteString(const char* pStr);
    6029  void BeginString(const char* pStr = VMA_NULL);
    6030  void ContinueString(const char* pStr);
    6031  void ContinueString(uint32_t n);
    6032  void ContinueString(uint64_t n);
    6033  void ContinueString_Pointer(const void* ptr);
    6034  void EndString(const char* pStr = VMA_NULL);
    6035 
    6036  void WriteNumber(uint32_t n);
    6037  void WriteNumber(uint64_t n);
    6038  void WriteBool(bool b);
    6039  void WriteNull();
    6040 
    6041 private:
    6042  static const char* const INDENT;
    6043 
    6044  enum COLLECTION_TYPE
    6045  {
    6046  COLLECTION_TYPE_OBJECT,
    6047  COLLECTION_TYPE_ARRAY,
    6048  };
    6049  struct StackItem
    6050  {
    6051  COLLECTION_TYPE type;
    6052  uint32_t valueCount;
    6053  bool singleLineMode;
    6054  };
    6055 
    6056  VmaStringBuilder& m_SB;
    6057  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6058  bool m_InsideString;
    6059 
    6060  void BeginValue(bool isString);
    6061  void WriteIndent(bool oneLess = false);
    6062 };
    6063 
    6064 const char* const VmaJsonWriter::INDENT = " ";
    6065 
    6066 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6067  m_SB(sb),
    6068  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6069  m_InsideString(false)
    6070 {
    6071 }
    6072 
    6073 VmaJsonWriter::~VmaJsonWriter()
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076  VMA_ASSERT(m_Stack.empty());
    6077 }
    6078 
    6079 void VmaJsonWriter::BeginObject(bool singleLine)
    6080 {
    6081  VMA_ASSERT(!m_InsideString);
    6082 
    6083  BeginValue(false);
    6084  m_SB.Add('{');
    6085 
    6086  StackItem item;
    6087  item.type = COLLECTION_TYPE_OBJECT;
    6088  item.valueCount = 0;
    6089  item.singleLineMode = singleLine;
    6090  m_Stack.push_back(item);
    6091 }
    6092 
    6093 void VmaJsonWriter::EndObject()
    6094 {
    6095  VMA_ASSERT(!m_InsideString);
    6096 
    6097  WriteIndent(true);
    6098  m_SB.Add('}');
    6099 
    6100  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6101  m_Stack.pop_back();
    6102 }
    6103 
    6104 void VmaJsonWriter::BeginArray(bool singleLine)
    6105 {
    6106  VMA_ASSERT(!m_InsideString);
    6107 
    6108  BeginValue(false);
    6109  m_SB.Add('[');
    6110 
    6111  StackItem item;
    6112  item.type = COLLECTION_TYPE_ARRAY;
    6113  item.valueCount = 0;
    6114  item.singleLineMode = singleLine;
    6115  m_Stack.push_back(item);
    6116 }
    6117 
    6118 void VmaJsonWriter::EndArray()
    6119 {
    6120  VMA_ASSERT(!m_InsideString);
    6121 
    6122  WriteIndent(true);
    6123  m_SB.Add(']');
    6124 
    6125  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6126  m_Stack.pop_back();
    6127 }
    6128 
    6129 void VmaJsonWriter::WriteString(const char* pStr)
    6130 {
    6131  BeginString(pStr);
    6132  EndString();
    6133 }
    6134 
    6135 void VmaJsonWriter::BeginString(const char* pStr)
    6136 {
    6137  VMA_ASSERT(!m_InsideString);
    6138 
    6139  BeginValue(true);
    6140  m_SB.Add('"');
    6141  m_InsideString = true;
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146 }
    6147 
    6148 void VmaJsonWriter::ContinueString(const char* pStr)
    6149 {
    6150  VMA_ASSERT(m_InsideString);
    6151 
    6152  const size_t strLen = strlen(pStr);
    6153  for(size_t i = 0; i < strLen; ++i)
    6154  {
    6155  char ch = pStr[i];
    6156  if(ch == '\\')
    6157  {
    6158  m_SB.Add("\\\\");
    6159  }
    6160  else if(ch == '"')
    6161  {
    6162  m_SB.Add("\\\"");
    6163  }
    6164  else if(ch >= 32)
    6165  {
    6166  m_SB.Add(ch);
    6167  }
    6168  else switch(ch)
    6169  {
    6170  case '\b':
    6171  m_SB.Add("\\b");
    6172  break;
    6173  case '\f':
    6174  m_SB.Add("\\f");
    6175  break;
    6176  case '\n':
    6177  m_SB.Add("\\n");
    6178  break;
    6179  case '\r':
    6180  m_SB.Add("\\r");
    6181  break;
    6182  case '\t':
    6183  m_SB.Add("\\t");
    6184  break;
    6185  default:
    6186  VMA_ASSERT(0 && "Character not currently supported.");
    6187  break;
    6188  }
    6189  }
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint32_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString(uint64_t n)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddNumber(n);
    6202 }
    6203 
    6204 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  m_SB.AddPointer(ptr);
    6208 }
    6209 
    6210 void VmaJsonWriter::EndString(const char* pStr)
    6211 {
    6212  VMA_ASSERT(m_InsideString);
    6213  if(pStr != VMA_NULL && pStr[0] != '\0')
    6214  {
    6215  ContinueString(pStr);
    6216  }
    6217  m_SB.Add('"');
    6218  m_InsideString = false;
    6219 }
    6220 
    6221 void VmaJsonWriter::WriteNumber(uint32_t n)
    6222 {
    6223  VMA_ASSERT(!m_InsideString);
    6224  BeginValue(false);
    6225  m_SB.AddNumber(n);
    6226 }
    6227 
    6228 void VmaJsonWriter::WriteNumber(uint64_t n)
    6229 {
    6230  VMA_ASSERT(!m_InsideString);
    6231  BeginValue(false);
    6232  m_SB.AddNumber(n);
    6233 }
    6234 
    6235 void VmaJsonWriter::WriteBool(bool b)
    6236 {
    6237  VMA_ASSERT(!m_InsideString);
    6238  BeginValue(false);
    6239  m_SB.Add(b ? "true" : "false");
    6240 }
    6241 
    6242 void VmaJsonWriter::WriteNull()
    6243 {
    6244  VMA_ASSERT(!m_InsideString);
    6245  BeginValue(false);
    6246  m_SB.Add("null");
    6247 }
    6248 
    6249 void VmaJsonWriter::BeginValue(bool isString)
    6250 {
    6251  if(!m_Stack.empty())
    6252  {
    6253  StackItem& currItem = m_Stack.back();
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 == 0)
    6256  {
    6257  VMA_ASSERT(isString);
    6258  }
    6259 
    6260  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6261  currItem.valueCount % 2 != 0)
    6262  {
    6263  m_SB.Add(": ");
    6264  }
    6265  else if(currItem.valueCount > 0)
    6266  {
    6267  m_SB.Add(", ");
    6268  WriteIndent();
    6269  }
    6270  else
    6271  {
    6272  WriteIndent();
    6273  }
    6274  ++currItem.valueCount;
    6275  }
    6276 }
    6277 
    6278 void VmaJsonWriter::WriteIndent(bool oneLess)
    6279 {
    6280  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6281  {
    6282  m_SB.AddNewLine();
    6283 
    6284  size_t count = m_Stack.size();
    6285  if(count > 0 && oneLess)
    6286  {
    6287  --count;
    6288  }
    6289  for(size_t i = 0; i < count; ++i)
    6290  {
    6291  m_SB.Add(INDENT);
    6292  }
    6293  }
    6294 }
    6295 
    6296 #endif // #if VMA_STATS_STRING_ENABLED
    6297 
    6299 
    6300 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6301 {
    6302  if(IsUserDataString())
    6303  {
    6304  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6305 
    6306  FreeUserDataString(hAllocator);
    6307 
    6308  if(pUserData != VMA_NULL)
    6309  {
    6310  const char* const newStrSrc = (char*)pUserData;
    6311  const size_t newStrLen = strlen(newStrSrc);
    6312  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6313  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6314  m_pUserData = newStrDst;
    6315  }
    6316  }
    6317  else
    6318  {
    6319  m_pUserData = pUserData;
    6320  }
    6321 }
    6322 
    6323 void VmaAllocation_T::ChangeBlockAllocation(
    6324  VmaAllocator hAllocator,
    6325  VmaDeviceMemoryBlock* block,
    6326  VkDeviceSize offset)
    6327 {
    6328  VMA_ASSERT(block != VMA_NULL);
    6329  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6330 
    6331  // Move mapping reference counter from old block to new block.
    6332  if(block != m_BlockAllocation.m_Block)
    6333  {
    6334  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6335  if(IsPersistentMap())
    6336  ++mapRefCount;
    6337  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6338  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6339  }
    6340 
    6341  m_BlockAllocation.m_Block = block;
    6342  m_BlockAllocation.m_Offset = offset;
    6343 }
    6344 
    6345 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6346 {
    6347  VMA_ASSERT(newSize > 0);
    6348  m_Size = newSize;
    6349 }
    6350 
    6351 VkDeviceSize VmaAllocation_T::GetOffset() const
    6352 {
    6353  switch(m_Type)
    6354  {
    6355  case ALLOCATION_TYPE_BLOCK:
    6356  return m_BlockAllocation.m_Offset;
    6357  case ALLOCATION_TYPE_DEDICATED:
    6358  return 0;
    6359  default:
    6360  VMA_ASSERT(0);
    6361  return 0;
    6362  }
    6363 }
    6364 
    6365 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6366 {
    6367  switch(m_Type)
    6368  {
    6369  case ALLOCATION_TYPE_BLOCK:
    6370  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6371  case ALLOCATION_TYPE_DEDICATED:
    6372  return m_DedicatedAllocation.m_hMemory;
    6373  default:
    6374  VMA_ASSERT(0);
    6375  return VK_NULL_HANDLE;
    6376  }
    6377 }
    6378 
    6379 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6380 {
    6381  switch(m_Type)
    6382  {
    6383  case ALLOCATION_TYPE_BLOCK:
    6384  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6385  case ALLOCATION_TYPE_DEDICATED:
    6386  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6387  default:
    6388  VMA_ASSERT(0);
    6389  return UINT32_MAX;
    6390  }
    6391 }
    6392 
    6393 void* VmaAllocation_T::GetMappedData() const
    6394 {
    6395  switch(m_Type)
    6396  {
    6397  case ALLOCATION_TYPE_BLOCK:
    6398  if(m_MapCount != 0)
    6399  {
    6400  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6401  VMA_ASSERT(pBlockData != VMA_NULL);
    6402  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6403  }
    6404  else
    6405  {
    6406  return VMA_NULL;
    6407  }
    6408  break;
    6409  case ALLOCATION_TYPE_DEDICATED:
    6410  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6411  return m_DedicatedAllocation.m_pMappedData;
    6412  default:
    6413  VMA_ASSERT(0);
    6414  return VMA_NULL;
    6415  }
    6416 }
    6417 
    6418 bool VmaAllocation_T::CanBecomeLost() const
    6419 {
    6420  switch(m_Type)
    6421  {
    6422  case ALLOCATION_TYPE_BLOCK:
    6423  return m_BlockAllocation.m_CanBecomeLost;
    6424  case ALLOCATION_TYPE_DEDICATED:
    6425  return false;
    6426  default:
    6427  VMA_ASSERT(0);
    6428  return false;
    6429  }
    6430 }
    6431 
    6432 VmaPool VmaAllocation_T::GetPool() const
    6433 {
    6434  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6435  return m_BlockAllocation.m_hPool;
    6436 }
    6437 
    6438 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6439 {
    6440  VMA_ASSERT(CanBecomeLost());
    6441 
    6442  /*
    6443  Warning: This is a carefully designed algorithm.
    6444  Do not modify unless you really know what you're doing :)
    6445  */
    6446  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6447  for(;;)
    6448  {
    6449  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6450  {
    6451  VMA_ASSERT(0);
    6452  return false;
    6453  }
    6454  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6455  {
    6456  return false;
    6457  }
    6458  else // Last use time earlier than current time.
    6459  {
    6460  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6461  {
    6462  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6463  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6464  return true;
    6465  }
    6466  }
    6467  }
    6468 }
    6469 
    6470 #if VMA_STATS_STRING_ENABLED
    6471 
    6472 // Correspond to values of enum VmaSuballocationType.
    6473 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6474  "FREE",
    6475  "UNKNOWN",
    6476  "BUFFER",
    6477  "IMAGE_UNKNOWN",
    6478  "IMAGE_LINEAR",
    6479  "IMAGE_OPTIMAL",
    6480 };
    6481 
    6482 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6483 {
    6484  json.WriteString("Type");
    6485  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6486 
    6487  json.WriteString("Size");
    6488  json.WriteNumber(m_Size);
    6489 
    6490  if(m_pUserData != VMA_NULL)
    6491  {
    6492  json.WriteString("UserData");
    6493  if(IsUserDataString())
    6494  {
    6495  json.WriteString((const char*)m_pUserData);
    6496  }
    6497  else
    6498  {
    6499  json.BeginString();
    6500  json.ContinueString_Pointer(m_pUserData);
    6501  json.EndString();
    6502  }
    6503  }
    6504 
    6505  json.WriteString("CreationFrameIndex");
    6506  json.WriteNumber(m_CreationFrameIndex);
    6507 
    6508  json.WriteString("LastUseFrameIndex");
    6509  json.WriteNumber(GetLastUseFrameIndex());
    6510 
    6511  if(m_BufferImageUsage != 0)
    6512  {
    6513  json.WriteString("Usage");
    6514  json.WriteNumber(m_BufferImageUsage);
    6515  }
    6516 }
    6517 
    6518 #endif
    6519 
    6520 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(IsUserDataString());
    6523  if(m_pUserData != VMA_NULL)
    6524  {
    6525  char* const oldStr = (char*)m_pUserData;
    6526  const size_t oldStrLen = strlen(oldStr);
    6527  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6528  m_pUserData = VMA_NULL;
    6529  }
    6530 }
    6531 
    6532 void VmaAllocation_T::BlockAllocMap()
    6533 {
    6534  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6535 
    6536  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6537  {
    6538  ++m_MapCount;
    6539  }
    6540  else
    6541  {
    6542  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6543  }
    6544 }
    6545 
    6546 void VmaAllocation_T::BlockAllocUnmap()
    6547 {
    6548  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6549 
    6550  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6551  {
    6552  --m_MapCount;
    6553  }
    6554  else
    6555  {
    6556  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6557  }
    6558 }
    6559 
    6560 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6561 {
    6562  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6563 
    6564  if(m_MapCount != 0)
    6565  {
    6566  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6567  {
    6568  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6569  *ppData = m_DedicatedAllocation.m_pMappedData;
    6570  ++m_MapCount;
    6571  return VK_SUCCESS;
    6572  }
    6573  else
    6574  {
    6575  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6576  return VK_ERROR_MEMORY_MAP_FAILED;
    6577  }
    6578  }
    6579  else
    6580  {
    6581  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6582  hAllocator->m_hDevice,
    6583  m_DedicatedAllocation.m_hMemory,
    6584  0, // offset
    6585  VK_WHOLE_SIZE,
    6586  0, // flags
    6587  ppData);
    6588  if(result == VK_SUCCESS)
    6589  {
    6590  m_DedicatedAllocation.m_pMappedData = *ppData;
    6591  m_MapCount = 1;
    6592  }
    6593  return result;
    6594  }
    6595 }
    6596 
    6597 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6598 {
    6599  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6600 
    6601  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6602  {
    6603  --m_MapCount;
    6604  if(m_MapCount == 0)
    6605  {
    6606  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6607  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6608  hAllocator->m_hDevice,
    6609  m_DedicatedAllocation.m_hMemory);
    6610  }
    6611  }
    6612  else
    6613  {
    6614  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6615  }
    6616 }
    6617 
    6618 #if VMA_STATS_STRING_ENABLED
    6619 
    6620 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6621 {
    6622  json.BeginObject();
    6623 
    6624  json.WriteString("Blocks");
    6625  json.WriteNumber(stat.blockCount);
    6626 
    6627  json.WriteString("Allocations");
    6628  json.WriteNumber(stat.allocationCount);
    6629 
    6630  json.WriteString("UnusedRanges");
    6631  json.WriteNumber(stat.unusedRangeCount);
    6632 
    6633  json.WriteString("UsedBytes");
    6634  json.WriteNumber(stat.usedBytes);
    6635 
    6636  json.WriteString("UnusedBytes");
    6637  json.WriteNumber(stat.unusedBytes);
    6638 
    6639  if(stat.allocationCount > 1)
    6640  {
    6641  json.WriteString("AllocationSize");
    6642  json.BeginObject(true);
    6643  json.WriteString("Min");
    6644  json.WriteNumber(stat.allocationSizeMin);
    6645  json.WriteString("Avg");
    6646  json.WriteNumber(stat.allocationSizeAvg);
    6647  json.WriteString("Max");
    6648  json.WriteNumber(stat.allocationSizeMax);
    6649  json.EndObject();
    6650  }
    6651 
    6652  if(stat.unusedRangeCount > 1)
    6653  {
    6654  json.WriteString("UnusedRangeSize");
    6655  json.BeginObject(true);
    6656  json.WriteString("Min");
    6657  json.WriteNumber(stat.unusedRangeSizeMin);
    6658  json.WriteString("Avg");
    6659  json.WriteNumber(stat.unusedRangeSizeAvg);
    6660  json.WriteString("Max");
    6661  json.WriteNumber(stat.unusedRangeSizeMax);
    6662  json.EndObject();
    6663  }
    6664 
    6665  json.EndObject();
    6666 }
    6667 
    6668 #endif // #if VMA_STATS_STRING_ENABLED
    6669 
    6670 struct VmaSuballocationItemSizeLess
    6671 {
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  const VmaSuballocationList::iterator rhs) const
    6675  {
    6676  return lhs->size < rhs->size;
    6677  }
    6678  bool operator()(
    6679  const VmaSuballocationList::iterator lhs,
    6680  VkDeviceSize rhsSize) const
    6681  {
    6682  return lhs->size < rhsSize;
    6683  }
    6684 };
    6685 
    6686 
    6688 // class VmaBlockMetadata
    6689 
    6690 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6691  m_Size(0),
    6692  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6693 {
    6694 }
    6695 
    6696 #if VMA_STATS_STRING_ENABLED
    6697 
    6698 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6699  VkDeviceSize unusedBytes,
    6700  size_t allocationCount,
    6701  size_t unusedRangeCount) const
    6702 {
    6703  json.BeginObject();
    6704 
    6705  json.WriteString("TotalBytes");
    6706  json.WriteNumber(GetSize());
    6707 
    6708  json.WriteString("UnusedBytes");
    6709  json.WriteNumber(unusedBytes);
    6710 
    6711  json.WriteString("Allocations");
    6712  json.WriteNumber((uint64_t)allocationCount);
    6713 
    6714  json.WriteString("UnusedRanges");
    6715  json.WriteNumber((uint64_t)unusedRangeCount);
    6716 
    6717  json.WriteString("Suballocations");
    6718  json.BeginArray();
    6719 }
    6720 
    6721 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6722  VkDeviceSize offset,
    6723  VmaAllocation hAllocation) const
    6724 {
    6725  json.BeginObject(true);
    6726 
    6727  json.WriteString("Offset");
    6728  json.WriteNumber(offset);
    6729 
    6730  hAllocation->PrintParameters(json);
    6731 
    6732  json.EndObject();
    6733 }
    6734 
    6735 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6736  VkDeviceSize offset,
    6737  VkDeviceSize size) const
    6738 {
    6739  json.BeginObject(true);
    6740 
    6741  json.WriteString("Offset");
    6742  json.WriteNumber(offset);
    6743 
    6744  json.WriteString("Type");
    6745  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6746 
    6747  json.WriteString("Size");
    6748  json.WriteNumber(size);
    6749 
    6750  json.EndObject();
    6751 }
    6752 
    6753 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6754 {
    6755  json.EndArray();
    6756  json.EndObject();
    6757 }
    6758 
    6759 #endif // #if VMA_STATS_STRING_ENABLED
    6760 
    6762 // class VmaBlockMetadata_Generic
    6763 
    6764 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6765  VmaBlockMetadata(hAllocator),
    6766  m_FreeCount(0),
    6767  m_SumFreeSize(0),
    6768  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6769  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6770 {
    6771 }
    6772 
    6773 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6774 {
    6775 }
    6776 
    6777 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6778 {
    6779  VmaBlockMetadata::Init(size);
    6780 
    6781  m_FreeCount = 1;
    6782  m_SumFreeSize = size;
    6783 
    6784  VmaSuballocation suballoc = {};
    6785  suballoc.offset = 0;
    6786  suballoc.size = size;
    6787  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6788  suballoc.hAllocation = VK_NULL_HANDLE;
    6789 
    6790  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6791  m_Suballocations.push_back(suballoc);
    6792  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6793  --suballocItem;
    6794  m_FreeSuballocationsBySize.push_back(suballocItem);
    6795 }
    6796 
    6797 bool VmaBlockMetadata_Generic::Validate() const
    6798 {
    6799  VMA_VALIDATE(!m_Suballocations.empty());
    6800 
    6801  // Expected offset of new suballocation as calculated from previous ones.
    6802  VkDeviceSize calculatedOffset = 0;
    6803  // Expected number of free suballocations as calculated from traversing their list.
    6804  uint32_t calculatedFreeCount = 0;
    6805  // Expected sum size of free suballocations as calculated from traversing their list.
    6806  VkDeviceSize calculatedSumFreeSize = 0;
    6807  // Expected number of free suballocations that should be registered in
    6808  // m_FreeSuballocationsBySize calculated from traversing their list.
    6809  size_t freeSuballocationsToRegister = 0;
    6810  // True if previous visited suballocation was free.
    6811  bool prevFree = false;
    6812 
    6813  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6814  suballocItem != m_Suballocations.cend();
    6815  ++suballocItem)
    6816  {
    6817  const VmaSuballocation& subAlloc = *suballocItem;
    6818 
    6819  // Actual offset of this suballocation doesn't match expected one.
    6820  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6821 
    6822  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6823  // Two adjacent free suballocations are invalid. They should be merged.
    6824  VMA_VALIDATE(!prevFree || !currFree);
    6825 
    6826  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6827 
    6828  if(currFree)
    6829  {
    6830  calculatedSumFreeSize += subAlloc.size;
    6831  ++calculatedFreeCount;
    6832  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6833  {
    6834  ++freeSuballocationsToRegister;
    6835  }
    6836 
    6837  // Margin required between allocations - every free space must be at least that large.
    6838  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6839  }
    6840  else
    6841  {
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6843  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6844 
    6845  // Margin required between allocations - previous allocation must be free.
    6846  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6847  }
    6848 
    6849  calculatedOffset += subAlloc.size;
    6850  prevFree = currFree;
    6851  }
    6852 
    6853  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6854  // match expected one.
    6855  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6856 
    6857  VkDeviceSize lastSize = 0;
    6858  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6859  {
    6860  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6861 
    6862  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6863  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6864  // They must be sorted by size ascending.
    6865  VMA_VALIDATE(suballocItem->size >= lastSize);
    6866 
    6867  lastSize = suballocItem->size;
    6868  }
    6869 
    6870  // Check if totals match calculacted values.
    6871  VMA_VALIDATE(ValidateFreeSuballocationList());
    6872  VMA_VALIDATE(calculatedOffset == GetSize());
    6873  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6874  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6875 
    6876  return true;
    6877 }
    6878 
    6879 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6880 {
    6881  if(!m_FreeSuballocationsBySize.empty())
    6882  {
    6883  return m_FreeSuballocationsBySize.back()->size;
    6884  }
    6885  else
    6886  {
    6887  return 0;
    6888  }
    6889 }
    6890 
    6891 bool VmaBlockMetadata_Generic::IsEmpty() const
    6892 {
    6893  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6894 }
    6895 
    6896 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6897 {
    6898  outInfo.blockCount = 1;
    6899 
    6900  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6901  outInfo.allocationCount = rangeCount - m_FreeCount;
    6902  outInfo.unusedRangeCount = m_FreeCount;
    6903 
    6904  outInfo.unusedBytes = m_SumFreeSize;
    6905  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6906 
    6907  outInfo.allocationSizeMin = UINT64_MAX;
    6908  outInfo.allocationSizeMax = 0;
    6909  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6910  outInfo.unusedRangeSizeMax = 0;
    6911 
    6912  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6913  suballocItem != m_Suballocations.cend();
    6914  ++suballocItem)
    6915  {
    6916  const VmaSuballocation& suballoc = *suballocItem;
    6917  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6918  {
    6919  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6920  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6921  }
    6922  else
    6923  {
    6924  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6925  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6926  }
    6927  }
    6928 }
    6929 
    6930 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6931 {
    6932  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6933 
    6934  inoutStats.size += GetSize();
    6935  inoutStats.unusedSize += m_SumFreeSize;
    6936  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6937  inoutStats.unusedRangeCount += m_FreeCount;
    6938  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6939 }
    6940 
    6941 #if VMA_STATS_STRING_ENABLED
    6942 
    6943 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6944 {
    6945  PrintDetailedMap_Begin(json,
    6946  m_SumFreeSize, // unusedBytes
    6947  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6948  m_FreeCount); // unusedRangeCount
    6949 
    6950  size_t i = 0;
    6951  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6952  suballocItem != m_Suballocations.cend();
    6953  ++suballocItem, ++i)
    6954  {
    6955  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6956  {
    6957  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6958  }
    6959  else
    6960  {
    6961  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6962  }
    6963  }
    6964 
    6965  PrintDetailedMap_End(json);
    6966 }
    6967 
    6968 #endif // #if VMA_STATS_STRING_ENABLED
    6969 
    6970 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6971  uint32_t currentFrameIndex,
    6972  uint32_t frameInUseCount,
    6973  VkDeviceSize bufferImageGranularity,
    6974  VkDeviceSize allocSize,
    6975  VkDeviceSize allocAlignment,
    6976  bool upperAddress,
    6977  VmaSuballocationType allocType,
    6978  bool canMakeOtherLost,
    6979  uint32_t strategy,
    6980  VmaAllocationRequest* pAllocationRequest)
    6981 {
    6982  VMA_ASSERT(allocSize > 0);
    6983  VMA_ASSERT(!upperAddress);
    6984  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6985  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6986  VMA_HEAVY_ASSERT(Validate());
    6987 
    6988  // There is not enough total free space in this block to fullfill the request: Early return.
    6989  if(canMakeOtherLost == false &&
    6990  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6991  {
    6992  return false;
    6993  }
    6994 
    6995  // New algorithm, efficiently searching freeSuballocationsBySize.
    6996  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6997  if(freeSuballocCount > 0)
    6998  {
    7000  {
    7001  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7002  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7003  m_FreeSuballocationsBySize.data(),
    7004  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7005  allocSize + 2 * VMA_DEBUG_MARGIN,
    7006  VmaSuballocationItemSizeLess());
    7007  size_t index = it - m_FreeSuballocationsBySize.data();
    7008  for(; index < freeSuballocCount; ++index)
    7009  {
    7010  if(CheckAllocation(
    7011  currentFrameIndex,
    7012  frameInUseCount,
    7013  bufferImageGranularity,
    7014  allocSize,
    7015  allocAlignment,
    7016  allocType,
    7017  m_FreeSuballocationsBySize[index],
    7018  false, // canMakeOtherLost
    7019  &pAllocationRequest->offset,
    7020  &pAllocationRequest->itemsToMakeLostCount,
    7021  &pAllocationRequest->sumFreeSize,
    7022  &pAllocationRequest->sumItemSize))
    7023  {
    7024  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7025  return true;
    7026  }
    7027  }
    7028  }
    7029  else // WORST_FIT, FIRST_FIT
    7030  {
    7031  // Search staring from biggest suballocations.
    7032  for(size_t index = freeSuballocCount; index--; )
    7033  {
    7034  if(CheckAllocation(
    7035  currentFrameIndex,
    7036  frameInUseCount,
    7037  bufferImageGranularity,
    7038  allocSize,
    7039  allocAlignment,
    7040  allocType,
    7041  m_FreeSuballocationsBySize[index],
    7042  false, // canMakeOtherLost
    7043  &pAllocationRequest->offset,
    7044  &pAllocationRequest->itemsToMakeLostCount,
    7045  &pAllocationRequest->sumFreeSize,
    7046  &pAllocationRequest->sumItemSize))
    7047  {
    7048  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7049  return true;
    7050  }
    7051  }
    7052  }
    7053  }
    7054 
    7055  if(canMakeOtherLost)
    7056  {
    7057  // Brute-force algorithm. TODO: Come up with something better.
    7058 
    7059  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7060  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7061 
    7062  VmaAllocationRequest tmpAllocRequest = {};
    7063  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7064  suballocIt != m_Suballocations.end();
    7065  ++suballocIt)
    7066  {
    7067  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7068  suballocIt->hAllocation->CanBecomeLost())
    7069  {
    7070  if(CheckAllocation(
    7071  currentFrameIndex,
    7072  frameInUseCount,
    7073  bufferImageGranularity,
    7074  allocSize,
    7075  allocAlignment,
    7076  allocType,
    7077  suballocIt,
    7078  canMakeOtherLost,
    7079  &tmpAllocRequest.offset,
    7080  &tmpAllocRequest.itemsToMakeLostCount,
    7081  &tmpAllocRequest.sumFreeSize,
    7082  &tmpAllocRequest.sumItemSize))
    7083  {
    7084  tmpAllocRequest.item = suballocIt;
    7085 
    7086  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7088  {
    7089  *pAllocationRequest = tmpAllocRequest;
    7090  }
    7091  }
    7092  }
    7093  }
    7094 
    7095  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7096  {
    7097  return true;
    7098  }
    7099  }
    7100 
    7101  return false;
    7102 }
    7103 
    7104 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7105  uint32_t currentFrameIndex,
    7106  uint32_t frameInUseCount,
    7107  VmaAllocationRequest* pAllocationRequest)
    7108 {
    7109  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7110  {
    7111  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7112  {
    7113  ++pAllocationRequest->item;
    7114  }
    7115  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7117  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7118  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7119  {
    7120  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7121  --pAllocationRequest->itemsToMakeLostCount;
    7122  }
    7123  else
    7124  {
    7125  return false;
    7126  }
    7127  }
    7128 
    7129  VMA_HEAVY_ASSERT(Validate());
    7130  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7131  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7132 
    7133  return true;
    7134 }
    7135 
    7136 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7137 {
    7138  uint32_t lostAllocationCount = 0;
    7139  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7140  it != m_Suballocations.end();
    7141  ++it)
    7142  {
    7143  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7144  it->hAllocation->CanBecomeLost() &&
    7145  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7146  {
    7147  it = FreeSuballocation(it);
    7148  ++lostAllocationCount;
    7149  }
    7150  }
    7151  return lostAllocationCount;
    7152 }
    7153 
    7154 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7155 {
    7156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7157  it != m_Suballocations.end();
    7158  ++it)
    7159  {
    7160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7161  {
    7162  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7163  {
    7164  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7165  return VK_ERROR_VALIDATION_FAILED_EXT;
    7166  }
    7167  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7168  {
    7169  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7170  return VK_ERROR_VALIDATION_FAILED_EXT;
    7171  }
    7172  }
    7173  }
    7174 
    7175  return VK_SUCCESS;
    7176 }
    7177 
    7178 void VmaBlockMetadata_Generic::Alloc(
    7179  const VmaAllocationRequest& request,
    7180  VmaSuballocationType type,
    7181  VkDeviceSize allocSize,
    7182  bool upperAddress,
    7183  VmaAllocation hAllocation)
    7184 {
    7185  VMA_ASSERT(!upperAddress);
    7186  VMA_ASSERT(request.item != m_Suballocations.end());
    7187  VmaSuballocation& suballoc = *request.item;
    7188  // Given suballocation is a free block.
    7189  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7190  // Given offset is inside this suballocation.
    7191  VMA_ASSERT(request.offset >= suballoc.offset);
    7192  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7193  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7194  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7195 
    7196  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7197  // it to become used.
    7198  UnregisterFreeSuballocation(request.item);
    7199 
    7200  suballoc.offset = request.offset;
    7201  suballoc.size = allocSize;
    7202  suballoc.type = type;
    7203  suballoc.hAllocation = hAllocation;
    7204 
    7205  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7206  if(paddingEnd)
    7207  {
    7208  VmaSuballocation paddingSuballoc = {};
    7209  paddingSuballoc.offset = request.offset + allocSize;
    7210  paddingSuballoc.size = paddingEnd;
    7211  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7212  VmaSuballocationList::iterator next = request.item;
    7213  ++next;
    7214  const VmaSuballocationList::iterator paddingEndItem =
    7215  m_Suballocations.insert(next, paddingSuballoc);
    7216  RegisterFreeSuballocation(paddingEndItem);
    7217  }
    7218 
    7219  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7220  if(paddingBegin)
    7221  {
    7222  VmaSuballocation paddingSuballoc = {};
    7223  paddingSuballoc.offset = request.offset - paddingBegin;
    7224  paddingSuballoc.size = paddingBegin;
    7225  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7226  const VmaSuballocationList::iterator paddingBeginItem =
    7227  m_Suballocations.insert(request.item, paddingSuballoc);
    7228  RegisterFreeSuballocation(paddingBeginItem);
    7229  }
    7230 
    7231  // Update totals.
    7232  m_FreeCount = m_FreeCount - 1;
    7233  if(paddingBegin > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  if(paddingEnd > 0)
    7238  {
    7239  ++m_FreeCount;
    7240  }
    7241  m_SumFreeSize -= allocSize;
    7242 }
    7243 
    7244 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7245 {
    7246  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7247  suballocItem != m_Suballocations.end();
    7248  ++suballocItem)
    7249  {
    7250  VmaSuballocation& suballoc = *suballocItem;
    7251  if(suballoc.hAllocation == allocation)
    7252  {
    7253  FreeSuballocation(suballocItem);
    7254  VMA_HEAVY_ASSERT(Validate());
    7255  return;
    7256  }
    7257  }
    7258  VMA_ASSERT(0 && "Not found!");
    7259 }
    7260 
    7261 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7262 {
    7263  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7264  suballocItem != m_Suballocations.end();
    7265  ++suballocItem)
    7266  {
    7267  VmaSuballocation& suballoc = *suballocItem;
    7268  if(suballoc.offset == offset)
    7269  {
    7270  FreeSuballocation(suballocItem);
    7271  return;
    7272  }
    7273  }
    7274  VMA_ASSERT(0 && "Not found!");
    7275 }
    7276 
    7277 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7278 {
    7279  typedef VmaSuballocationList::iterator iter_type;
    7280  for(iter_type suballocItem = m_Suballocations.begin();
    7281  suballocItem != m_Suballocations.end();
    7282  ++suballocItem)
    7283  {
    7284  VmaSuballocation& suballoc = *suballocItem;
    7285  if(suballoc.hAllocation == alloc)
    7286  {
    7287  iter_type nextItem = suballocItem;
    7288  ++nextItem;
    7289 
    7290  // Should have been ensured on higher level.
    7291  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7292 
    7293  // Shrinking.
    7294  if(newSize < alloc->GetSize())
    7295  {
    7296  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7297 
    7298  // There is next item.
    7299  if(nextItem != m_Suballocations.end())
    7300  {
    7301  // Next item is free.
    7302  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7303  {
    7304  // Grow this next item backward.
    7305  UnregisterFreeSuballocation(nextItem);
    7306  nextItem->offset -= sizeDiff;
    7307  nextItem->size += sizeDiff;
    7308  RegisterFreeSuballocation(nextItem);
    7309  }
    7310  // Next item is not free.
    7311  else
    7312  {
    7313  // Create free item after current one.
    7314  VmaSuballocation newFreeSuballoc;
    7315  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7316  newFreeSuballoc.offset = suballoc.offset + newSize;
    7317  newFreeSuballoc.size = sizeDiff;
    7318  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7319  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7320  RegisterFreeSuballocation(newFreeSuballocIt);
    7321 
    7322  ++m_FreeCount;
    7323  }
    7324  }
    7325  // This is the last item.
    7326  else
    7327  {
    7328  // Create free item at the end.
    7329  VmaSuballocation newFreeSuballoc;
    7330  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7331  newFreeSuballoc.offset = suballoc.offset + newSize;
    7332  newFreeSuballoc.size = sizeDiff;
    7333  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7334  m_Suballocations.push_back(newFreeSuballoc);
    7335 
    7336  iter_type newFreeSuballocIt = m_Suballocations.end();
    7337  RegisterFreeSuballocation(--newFreeSuballocIt);
    7338 
    7339  ++m_FreeCount;
    7340  }
    7341 
    7342  suballoc.size = newSize;
    7343  m_SumFreeSize += sizeDiff;
    7344  }
    7345  // Growing.
    7346  else
    7347  {
    7348  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7349 
    7350  // There is next item.
    7351  if(nextItem != m_Suballocations.end())
    7352  {
    7353  // Next item is free.
    7354  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7355  {
    7356  // There is not enough free space, including margin.
    7357  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7358  {
    7359  return false;
    7360  }
    7361 
    7362  // There is more free space than required.
    7363  if(nextItem->size > sizeDiff)
    7364  {
    7365  // Move and shrink this next item.
    7366  UnregisterFreeSuballocation(nextItem);
    7367  nextItem->offset += sizeDiff;
    7368  nextItem->size -= sizeDiff;
    7369  RegisterFreeSuballocation(nextItem);
    7370  }
    7371  // There is exactly the amount of free space required.
    7372  else
    7373  {
    7374  // Remove this next free item.
    7375  UnregisterFreeSuballocation(nextItem);
    7376  m_Suballocations.erase(nextItem);
    7377  --m_FreeCount;
    7378  }
    7379  }
    7380  // Next item is not free - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385  }
    7386  // This is the last item - there is no space to grow.
    7387  else
    7388  {
    7389  return false;
    7390  }
    7391 
    7392  suballoc.size = newSize;
    7393  m_SumFreeSize -= sizeDiff;
    7394  }
    7395 
    7396  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7397  return true;
    7398  }
    7399  }
    7400  VMA_ASSERT(0 && "Not found!");
    7401  return false;
    7402 }
    7403 
    7404 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7405 {
    7406  VkDeviceSize lastSize = 0;
    7407  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7408  {
    7409  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7410 
    7411  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7412  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7413  VMA_VALIDATE(it->size >= lastSize);
    7414  lastSize = it->size;
    7415  }
    7416  return true;
    7417 }
    7418 
    7419 bool VmaBlockMetadata_Generic::CheckAllocation(
    7420  uint32_t currentFrameIndex,
    7421  uint32_t frameInUseCount,
    7422  VkDeviceSize bufferImageGranularity,
    7423  VkDeviceSize allocSize,
    7424  VkDeviceSize allocAlignment,
    7425  VmaSuballocationType allocType,
    7426  VmaSuballocationList::const_iterator suballocItem,
    7427  bool canMakeOtherLost,
    7428  VkDeviceSize* pOffset,
    7429  size_t* itemsToMakeLostCount,
    7430  VkDeviceSize* pSumFreeSize,
    7431  VkDeviceSize* pSumItemSize) const
    7432 {
    7433  VMA_ASSERT(allocSize > 0);
    7434  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7435  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7436  VMA_ASSERT(pOffset != VMA_NULL);
    7437 
    7438  *itemsToMakeLostCount = 0;
    7439  *pSumFreeSize = 0;
    7440  *pSumItemSize = 0;
    7441 
    7442  if(canMakeOtherLost)
    7443  {
    7444  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7445  {
    7446  *pSumFreeSize = suballocItem->size;
    7447  }
    7448  else
    7449  {
    7450  if(suballocItem->hAllocation->CanBecomeLost() &&
    7451  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7452  {
    7453  ++*itemsToMakeLostCount;
    7454  *pSumItemSize = suballocItem->size;
    7455  }
    7456  else
    7457  {
    7458  return false;
    7459  }
    7460  }
    7461 
    7462  // Remaining size is too small for this request: Early return.
    7463  if(GetSize() - suballocItem->offset < allocSize)
    7464  {
    7465  return false;
    7466  }
    7467 
    7468  // Start from offset equal to beginning of this suballocation.
    7469  *pOffset = suballocItem->offset;
    7470 
    7471  // Apply VMA_DEBUG_MARGIN at the beginning.
    7472  if(VMA_DEBUG_MARGIN > 0)
    7473  {
    7474  *pOffset += VMA_DEBUG_MARGIN;
    7475  }
    7476 
    7477  // Apply alignment.
    7478  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7479 
    7480  // Check previous suballocations for BufferImageGranularity conflicts.
    7481  // Make bigger alignment if necessary.
    7482  if(bufferImageGranularity > 1)
    7483  {
    7484  bool bufferImageGranularityConflict = false;
    7485  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7486  while(prevSuballocItem != m_Suballocations.cbegin())
    7487  {
    7488  --prevSuballocItem;
    7489  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7490  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7491  {
    7492  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7493  {
    7494  bufferImageGranularityConflict = true;
    7495  break;
    7496  }
    7497  }
    7498  else
    7499  // Already on previous page.
    7500  break;
    7501  }
    7502  if(bufferImageGranularityConflict)
    7503  {
    7504  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7505  }
    7506  }
    7507 
    7508  // Now that we have final *pOffset, check if we are past suballocItem.
    7509  // If yes, return false - this function should be called for another suballocItem as starting point.
    7510  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7511  {
    7512  return false;
    7513  }
    7514 
    7515  // Calculate padding at the beginning based on current offset.
    7516  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7517 
    7518  // Calculate required margin at the end.
    7519  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7520 
    7521  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7522  // Another early return check.
    7523  if(suballocItem->offset + totalSize > GetSize())
    7524  {
    7525  return false;
    7526  }
    7527 
    7528  // Advance lastSuballocItem until desired size is reached.
    7529  // Update itemsToMakeLostCount.
    7530  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7531  if(totalSize > suballocItem->size)
    7532  {
    7533  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7534  while(remainingSize > 0)
    7535  {
    7536  ++lastSuballocItem;
    7537  if(lastSuballocItem == m_Suballocations.cend())
    7538  {
    7539  return false;
    7540  }
    7541  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7542  {
    7543  *pSumFreeSize += lastSuballocItem->size;
    7544  }
    7545  else
    7546  {
    7547  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7548  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7549  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7550  {
    7551  ++*itemsToMakeLostCount;
    7552  *pSumItemSize += lastSuballocItem->size;
    7553  }
    7554  else
    7555  {
    7556  return false;
    7557  }
    7558  }
    7559  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7560  remainingSize - lastSuballocItem->size : 0;
    7561  }
    7562  }
    7563 
    7564  // Check next suballocations for BufferImageGranularity conflicts.
    7565  // If conflict exists, we must mark more allocations lost or fail.
    7566  if(bufferImageGranularity > 1)
    7567  {
    7568  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7569  ++nextSuballocItem;
    7570  while(nextSuballocItem != m_Suballocations.cend())
    7571  {
    7572  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7573  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7574  {
    7575  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7576  {
    7577  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7578  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7579  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7580  {
    7581  ++*itemsToMakeLostCount;
    7582  }
    7583  else
    7584  {
    7585  return false;
    7586  }
    7587  }
    7588  }
    7589  else
    7590  {
    7591  // Already on next page.
    7592  break;
    7593  }
    7594  ++nextSuballocItem;
    7595  }
    7596  }
    7597  }
    7598  else
    7599  {
    7600  const VmaSuballocation& suballoc = *suballocItem;
    7601  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7602 
    7603  *pSumFreeSize = suballoc.size;
    7604 
    7605  // Size of this suballocation is too small for this request: Early return.
    7606  if(suballoc.size < allocSize)
    7607  {
    7608  return false;
    7609  }
    7610 
    7611  // Start from offset equal to beginning of this suballocation.
    7612  *pOffset = suballoc.offset;
    7613 
    7614  // Apply VMA_DEBUG_MARGIN at the beginning.
    7615  if(VMA_DEBUG_MARGIN > 0)
    7616  {
    7617  *pOffset += VMA_DEBUG_MARGIN;
    7618  }
    7619 
    7620  // Apply alignment.
    7621  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7622 
    7623  // Check previous suballocations for BufferImageGranularity conflicts.
    7624  // Make bigger alignment if necessary.
    7625  if(bufferImageGranularity > 1)
    7626  {
    7627  bool bufferImageGranularityConflict = false;
    7628  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7629  while(prevSuballocItem != m_Suballocations.cbegin())
    7630  {
    7631  --prevSuballocItem;
    7632  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7633  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7634  {
    7635  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7636  {
    7637  bufferImageGranularityConflict = true;
    7638  break;
    7639  }
    7640  }
    7641  else
    7642  // Already on previous page.
    7643  break;
    7644  }
    7645  if(bufferImageGranularityConflict)
    7646  {
    7647  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7648  }
    7649  }
    7650 
    7651  // Calculate padding at the beginning based on current offset.
    7652  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7653 
    7654  // Calculate required margin at the end.
    7655  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7656 
    7657  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7658  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7659  {
    7660  return false;
    7661  }
    7662 
    7663  // Check next suballocations for BufferImageGranularity conflicts.
    7664  // If conflict exists, allocation cannot be made here.
    7665  if(bufferImageGranularity > 1)
    7666  {
    7667  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7668  ++nextSuballocItem;
    7669  while(nextSuballocItem != m_Suballocations.cend())
    7670  {
    7671  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7672  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7673  {
    7674  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7675  {
    7676  return false;
    7677  }
    7678  }
    7679  else
    7680  {
    7681  // Already on next page.
    7682  break;
    7683  }
    7684  ++nextSuballocItem;
    7685  }
    7686  }
    7687  }
    7688 
    7689  // All tests passed: Success. pOffset is already filled.
    7690  return true;
    7691 }
    7692 
    7693 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7694 {
    7695  VMA_ASSERT(item != m_Suballocations.end());
    7696  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7697 
    7698  VmaSuballocationList::iterator nextItem = item;
    7699  ++nextItem;
    7700  VMA_ASSERT(nextItem != m_Suballocations.end());
    7701  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7702 
    7703  item->size += nextItem->size;
    7704  --m_FreeCount;
    7705  m_Suballocations.erase(nextItem);
    7706 }
    7707 
    7708 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7709 {
    7710  // Change this suballocation to be marked as free.
    7711  VmaSuballocation& suballoc = *suballocItem;
    7712  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7713  suballoc.hAllocation = VK_NULL_HANDLE;
    7714 
    7715  // Update totals.
    7716  ++m_FreeCount;
    7717  m_SumFreeSize += suballoc.size;
    7718 
    7719  // Merge with previous and/or next suballocation if it's also free.
    7720  bool mergeWithNext = false;
    7721  bool mergeWithPrev = false;
    7722 
    7723  VmaSuballocationList::iterator nextItem = suballocItem;
    7724  ++nextItem;
    7725  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7726  {
    7727  mergeWithNext = true;
    7728  }
    7729 
    7730  VmaSuballocationList::iterator prevItem = suballocItem;
    7731  if(suballocItem != m_Suballocations.begin())
    7732  {
    7733  --prevItem;
    7734  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7735  {
    7736  mergeWithPrev = true;
    7737  }
    7738  }
    7739 
    7740  if(mergeWithNext)
    7741  {
    7742  UnregisterFreeSuballocation(nextItem);
    7743  MergeFreeWithNext(suballocItem);
    7744  }
    7745 
    7746  if(mergeWithPrev)
    7747  {
    7748  UnregisterFreeSuballocation(prevItem);
    7749  MergeFreeWithNext(prevItem);
    7750  RegisterFreeSuballocation(prevItem);
    7751  return prevItem;
    7752  }
    7753  else
    7754  {
    7755  RegisterFreeSuballocation(suballocItem);
    7756  return suballocItem;
    7757  }
    7758 }
    7759 
    7760 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7761 {
    7762  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7763  VMA_ASSERT(item->size > 0);
    7764 
    7765  // You may want to enable this validation at the beginning or at the end of
    7766  // this function, depending on what do you want to check.
    7767  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7768 
    7769  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7770  {
    7771  if(m_FreeSuballocationsBySize.empty())
    7772  {
    7773  m_FreeSuballocationsBySize.push_back(item);
    7774  }
    7775  else
    7776  {
    7777  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7778  }
    7779  }
    7780 
    7781  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7782 }
    7783 
    7784 
    7785 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7786 {
    7787  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7788  VMA_ASSERT(item->size > 0);
    7789 
    7790  // You may want to enable this validation at the beginning or at the end of
    7791  // this function, depending on what do you want to check.
    7792  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7793 
    7794  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7795  {
    7796  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7797  m_FreeSuballocationsBySize.data(),
    7798  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7799  item,
    7800  VmaSuballocationItemSizeLess());
    7801  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7802  index < m_FreeSuballocationsBySize.size();
    7803  ++index)
    7804  {
    7805  if(m_FreeSuballocationsBySize[index] == item)
    7806  {
    7807  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7808  return;
    7809  }
    7810  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7811  }
    7812  VMA_ASSERT(0 && "Not found.");
    7813  }
    7814 
    7815  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7816 }
    7817 
    7819 // class VmaBlockMetadata_Linear
    7820 
    7821 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7822  VmaBlockMetadata(hAllocator),
    7823  m_SumFreeSize(0),
    7824  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7826  m_1stVectorIndex(0),
    7827  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7828  m_1stNullItemsBeginCount(0),
    7829  m_1stNullItemsMiddleCount(0),
    7830  m_2ndNullItemsCount(0)
    7831 {
    7832 }
    7833 
    7834 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7835 {
    7836 }
    7837 
    7838 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7839 {
    7840  VmaBlockMetadata::Init(size);
    7841  m_SumFreeSize = size;
    7842 }
    7843 
    7844 bool VmaBlockMetadata_Linear::Validate() const
    7845 {
    7846  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7847  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7848 
    7849  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7850  VMA_VALIDATE(!suballocations1st.empty() ||
    7851  suballocations2nd.empty() ||
    7852  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7853 
    7854  if(!suballocations1st.empty())
    7855  {
    7856  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7857  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7858  // Null item at the end should be just pop_back().
    7859  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7860  }
    7861  if(!suballocations2nd.empty())
    7862  {
    7863  // Null item at the end should be just pop_back().
    7864  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7865  }
    7866 
    7867  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7868  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7869 
    7870  VkDeviceSize sumUsedSize = 0;
    7871  const size_t suballoc1stCount = suballocations1st.size();
    7872  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7873 
    7874  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7875  {
    7876  const size_t suballoc2ndCount = suballocations2nd.size();
    7877  size_t nullItem2ndCount = 0;
    7878  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7879  {
    7880  const VmaSuballocation& suballoc = suballocations2nd[i];
    7881  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7882 
    7883  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7884  VMA_VALIDATE(suballoc.offset >= offset);
    7885 
    7886  if(!currFree)
    7887  {
    7888  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7889  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7890  sumUsedSize += suballoc.size;
    7891  }
    7892  else
    7893  {
    7894  ++nullItem2ndCount;
    7895  }
    7896 
    7897  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7898  }
    7899 
    7900  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7901  }
    7902 
    7903  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7904  {
    7905  const VmaSuballocation& suballoc = suballocations1st[i];
    7906  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7907  suballoc.hAllocation == VK_NULL_HANDLE);
    7908  }
    7909 
    7910  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7911 
    7912  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7913  {
    7914  const VmaSuballocation& suballoc = suballocations1st[i];
    7915  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7916 
    7917  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7918  VMA_VALIDATE(suballoc.offset >= offset);
    7919  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7920 
    7921  if(!currFree)
    7922  {
    7923  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7924  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7925  sumUsedSize += suballoc.size;
    7926  }
    7927  else
    7928  {
    7929  ++nullItem1stCount;
    7930  }
    7931 
    7932  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7933  }
    7934  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7935 
    7936  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7937  {
    7938  const size_t suballoc2ndCount = suballocations2nd.size();
    7939  size_t nullItem2ndCount = 0;
    7940  for(size_t i = suballoc2ndCount; i--; )
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations2nd[i];
    7943  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7944 
    7945  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7946  VMA_VALIDATE(suballoc.offset >= offset);
    7947 
    7948  if(!currFree)
    7949  {
    7950  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7951  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7952  sumUsedSize += suballoc.size;
    7953  }
    7954  else
    7955  {
    7956  ++nullItem2ndCount;
    7957  }
    7958 
    7959  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7960  }
    7961 
    7962  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7963  }
    7964 
    7965  VMA_VALIDATE(offset <= GetSize());
    7966  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7967 
    7968  return true;
    7969 }
    7970 
    7971 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7972 {
    7973  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7974  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7975 }
    7976 
    7977 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7978 {
    7979  const VkDeviceSize size = GetSize();
    7980 
    7981  /*
    7982  We don't consider gaps inside allocation vectors with freed allocations because
    7983  they are not suitable for reuse in linear allocator. We consider only space that
    7984  is available for new allocations.
    7985  */
    7986  if(IsEmpty())
    7987  {
    7988  return size;
    7989  }
    7990 
    7991  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7992 
    7993  switch(m_2ndVectorMode)
    7994  {
    7995  case SECOND_VECTOR_EMPTY:
    7996  /*
    7997  Available space is after end of 1st, as well as before beginning of 1st (which
    7998  whould make it a ring buffer).
    7999  */
    8000  {
    8001  const size_t suballocations1stCount = suballocations1st.size();
    8002  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8003  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8004  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8005  return VMA_MAX(
    8006  firstSuballoc.offset,
    8007  size - (lastSuballoc.offset + lastSuballoc.size));
    8008  }
    8009  break;
    8010 
    8011  case SECOND_VECTOR_RING_BUFFER:
    8012  /*
    8013  Available space is only between end of 2nd and beginning of 1st.
    8014  */
    8015  {
    8016  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8017  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8018  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8019  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8020  }
    8021  break;
    8022 
    8023  case SECOND_VECTOR_DOUBLE_STACK:
    8024  /*
    8025  Available space is only between end of 1st and top of 2nd.
    8026  */
    8027  {
    8028  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8029  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8030  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8031  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8032  }
    8033  break;
    8034 
    8035  default:
    8036  VMA_ASSERT(0);
    8037  return 0;
    8038  }
    8039 }
    8040 
    8041 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8042 {
    8043  const VkDeviceSize size = GetSize();
    8044  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8045  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8046  const size_t suballoc1stCount = suballocations1st.size();
    8047  const size_t suballoc2ndCount = suballocations2nd.size();
    8048 
    8049  outInfo.blockCount = 1;
    8050  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8051  outInfo.unusedRangeCount = 0;
    8052  outInfo.usedBytes = 0;
    8053  outInfo.allocationSizeMin = UINT64_MAX;
    8054  outInfo.allocationSizeMax = 0;
    8055  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8056  outInfo.unusedRangeSizeMax = 0;
    8057 
    8058  VkDeviceSize lastOffset = 0;
    8059 
    8060  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8061  {
    8062  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8063  size_t nextAlloc2ndIndex = 0;
    8064  while(lastOffset < freeSpace2ndTo1stEnd)
    8065  {
    8066  // Find next non-null allocation or move nextAllocIndex to the end.
    8067  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8068  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8069  {
    8070  ++nextAlloc2ndIndex;
    8071  }
    8072 
    8073  // Found non-null allocation.
    8074  if(nextAlloc2ndIndex < suballoc2ndCount)
    8075  {
    8076  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8077 
    8078  // 1. Process free space before this allocation.
    8079  if(lastOffset < suballoc.offset)
    8080  {
    8081  // There is free space from lastOffset to suballoc.offset.
    8082  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8083  ++outInfo.unusedRangeCount;
    8084  outInfo.unusedBytes += unusedRangeSize;
    8085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8087  }
    8088 
    8089  // 2. Process this allocation.
    8090  // There is allocation with suballoc.offset, suballoc.size.
    8091  outInfo.usedBytes += suballoc.size;
    8092  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8093  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8094 
    8095  // 3. Prepare for next iteration.
    8096  lastOffset = suballoc.offset + suballoc.size;
    8097  ++nextAlloc2ndIndex;
    8098  }
    8099  // We are at the end.
    8100  else
    8101  {
    8102  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8103  if(lastOffset < freeSpace2ndTo1stEnd)
    8104  {
    8105  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8106  ++outInfo.unusedRangeCount;
    8107  outInfo.unusedBytes += unusedRangeSize;
    8108  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8109  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8110  }
    8111 
    8112  // End of loop.
    8113  lastOffset = freeSpace2ndTo1stEnd;
    8114  }
    8115  }
    8116  }
    8117 
    8118  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8119  const VkDeviceSize freeSpace1stTo2ndEnd =
    8120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8121  while(lastOffset < freeSpace1stTo2ndEnd)
    8122  {
    8123  // Find next non-null allocation or move nextAllocIndex to the end.
    8124  while(nextAlloc1stIndex < suballoc1stCount &&
    8125  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8126  {
    8127  ++nextAlloc1stIndex;
    8128  }
    8129 
    8130  // Found non-null allocation.
    8131  if(nextAlloc1stIndex < suballoc1stCount)
    8132  {
    8133  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8134 
    8135  // 1. Process free space before this allocation.
    8136  if(lastOffset < suballoc.offset)
    8137  {
    8138  // There is free space from lastOffset to suballoc.offset.
    8139  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8140  ++outInfo.unusedRangeCount;
    8141  outInfo.unusedBytes += unusedRangeSize;
    8142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8144  }
    8145 
    8146  // 2. Process this allocation.
    8147  // There is allocation with suballoc.offset, suballoc.size.
    8148  outInfo.usedBytes += suballoc.size;
    8149  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8150  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8151 
    8152  // 3. Prepare for next iteration.
    8153  lastOffset = suballoc.offset + suballoc.size;
    8154  ++nextAlloc1stIndex;
    8155  }
    8156  // We are at the end.
    8157  else
    8158  {
    8159  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8160  if(lastOffset < freeSpace1stTo2ndEnd)
    8161  {
    8162  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8163  ++outInfo.unusedRangeCount;
    8164  outInfo.unusedBytes += unusedRangeSize;
    8165  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8166  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8167  }
    8168 
    8169  // End of loop.
    8170  lastOffset = freeSpace1stTo2ndEnd;
    8171  }
    8172  }
    8173 
    8174  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8175  {
    8176  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8177  while(lastOffset < size)
    8178  {
    8179  // Find next non-null allocation or move nextAllocIndex to the end.
    8180  while(nextAlloc2ndIndex != SIZE_MAX &&
    8181  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8182  {
    8183  --nextAlloc2ndIndex;
    8184  }
    8185 
    8186  // Found non-null allocation.
    8187  if(nextAlloc2ndIndex != SIZE_MAX)
    8188  {
    8189  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8190 
    8191  // 1. Process free space before this allocation.
    8192  if(lastOffset < suballoc.offset)
    8193  {
    8194  // There is free space from lastOffset to suballoc.offset.
    8195  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8196  ++outInfo.unusedRangeCount;
    8197  outInfo.unusedBytes += unusedRangeSize;
    8198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8200  }
    8201 
    8202  // 2. Process this allocation.
    8203  // There is allocation with suballoc.offset, suballoc.size.
    8204  outInfo.usedBytes += suballoc.size;
    8205  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8206  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8207 
    8208  // 3. Prepare for next iteration.
    8209  lastOffset = suballoc.offset + suballoc.size;
    8210  --nextAlloc2ndIndex;
    8211  }
    8212  // We are at the end.
    8213  else
    8214  {
    8215  // There is free space from lastOffset to size.
    8216  if(lastOffset < size)
    8217  {
    8218  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8219  ++outInfo.unusedRangeCount;
    8220  outInfo.unusedBytes += unusedRangeSize;
    8221  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8222  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8223  }
    8224 
    8225  // End of loop.
    8226  lastOffset = size;
    8227  }
    8228  }
    8229  }
    8230 
    8231  outInfo.unusedBytes = size - outInfo.usedBytes;
    8232 }
    8233 
    8234 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8235 {
    8236  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8237  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8238  const VkDeviceSize size = GetSize();
    8239  const size_t suballoc1stCount = suballocations1st.size();
    8240  const size_t suballoc2ndCount = suballocations2nd.size();
    8241 
    8242  inoutStats.size += size;
    8243 
    8244  VkDeviceSize lastOffset = 0;
    8245 
    8246  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8247  {
    8248  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8249  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8250  while(lastOffset < freeSpace2ndTo1stEnd)
    8251  {
    8252  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8253  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8254  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8255  {
    8256  ++nextAlloc2ndIndex;
    8257  }
    8258 
    8259  // Found non-null allocation.
    8260  if(nextAlloc2ndIndex < suballoc2ndCount)
    8261  {
    8262  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8263 
    8264  // 1. Process free space before this allocation.
    8265  if(lastOffset < suballoc.offset)
    8266  {
    8267  // There is free space from lastOffset to suballoc.offset.
    8268  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8269  inoutStats.unusedSize += unusedRangeSize;
    8270  ++inoutStats.unusedRangeCount;
    8271  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8272  }
    8273 
    8274  // 2. Process this allocation.
    8275  // There is allocation with suballoc.offset, suballoc.size.
    8276  ++inoutStats.allocationCount;
    8277 
    8278  // 3. Prepare for next iteration.
    8279  lastOffset = suballoc.offset + suballoc.size;
    8280  ++nextAlloc2ndIndex;
    8281  }
    8282  // We are at the end.
    8283  else
    8284  {
    8285  if(lastOffset < freeSpace2ndTo1stEnd)
    8286  {
    8287  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8288  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8289  inoutStats.unusedSize += unusedRangeSize;
    8290  ++inoutStats.unusedRangeCount;
    8291  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8292  }
    8293 
    8294  // End of loop.
    8295  lastOffset = freeSpace2ndTo1stEnd;
    8296  }
    8297  }
    8298  }
    8299 
    8300  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8301  const VkDeviceSize freeSpace1stTo2ndEnd =
    8302  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8303  while(lastOffset < freeSpace1stTo2ndEnd)
    8304  {
    8305  // Find next non-null allocation or move nextAllocIndex to the end.
    8306  while(nextAlloc1stIndex < suballoc1stCount &&
    8307  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8308  {
    8309  ++nextAlloc1stIndex;
    8310  }
    8311 
    8312  // Found non-null allocation.
    8313  if(nextAlloc1stIndex < suballoc1stCount)
    8314  {
    8315  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8316 
    8317  // 1. Process free space before this allocation.
    8318  if(lastOffset < suballoc.offset)
    8319  {
    8320  // There is free space from lastOffset to suballoc.offset.
    8321  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8322  inoutStats.unusedSize += unusedRangeSize;
    8323  ++inoutStats.unusedRangeCount;
    8324  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8325  }
    8326 
    8327  // 2. Process this allocation.
    8328  // There is allocation with suballoc.offset, suballoc.size.
    8329  ++inoutStats.allocationCount;
    8330 
    8331  // 3. Prepare for next iteration.
    8332  lastOffset = suballoc.offset + suballoc.size;
    8333  ++nextAlloc1stIndex;
    8334  }
    8335  // We are at the end.
    8336  else
    8337  {
    8338  if(lastOffset < freeSpace1stTo2ndEnd)
    8339  {
    8340  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8341  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8342  inoutStats.unusedSize += unusedRangeSize;
    8343  ++inoutStats.unusedRangeCount;
    8344  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8345  }
    8346 
    8347  // End of loop.
    8348  lastOffset = freeSpace1stTo2ndEnd;
    8349  }
    8350  }
    8351 
    8352  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8353  {
    8354  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8355  while(lastOffset < size)
    8356  {
    8357  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8358  while(nextAlloc2ndIndex != SIZE_MAX &&
    8359  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8360  {
    8361  --nextAlloc2ndIndex;
    8362  }
    8363 
    8364  // Found non-null allocation.
    8365  if(nextAlloc2ndIndex != SIZE_MAX)
    8366  {
    8367  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8368 
    8369  // 1. Process free space before this allocation.
    8370  if(lastOffset < suballoc.offset)
    8371  {
    8372  // There is free space from lastOffset to suballoc.offset.
    8373  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8374  inoutStats.unusedSize += unusedRangeSize;
    8375  ++inoutStats.unusedRangeCount;
    8376  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8377  }
    8378 
    8379  // 2. Process this allocation.
    8380  // There is allocation with suballoc.offset, suballoc.size.
    8381  ++inoutStats.allocationCount;
    8382 
    8383  // 3. Prepare for next iteration.
    8384  lastOffset = suballoc.offset + suballoc.size;
    8385  --nextAlloc2ndIndex;
    8386  }
    8387  // We are at the end.
    8388  else
    8389  {
    8390  if(lastOffset < size)
    8391  {
    8392  // There is free space from lastOffset to size.
    8393  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8394  inoutStats.unusedSize += unusedRangeSize;
    8395  ++inoutStats.unusedRangeCount;
    8396  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8397  }
    8398 
    8399  // End of loop.
    8400  lastOffset = size;
    8401  }
    8402  }
    8403  }
    8404 }
    8405 
    8406 #if VMA_STATS_STRING_ENABLED
    8407 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8408 {
    8409  const VkDeviceSize size = GetSize();
    8410  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8411  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8412  const size_t suballoc1stCount = suballocations1st.size();
    8413  const size_t suballoc2ndCount = suballocations2nd.size();
    8414 
    8415  // FIRST PASS
    8416 
    8417  size_t unusedRangeCount = 0;
    8418  VkDeviceSize usedBytes = 0;
    8419 
    8420  VkDeviceSize lastOffset = 0;
    8421 
    8422  size_t alloc2ndCount = 0;
    8423  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8424  {
    8425  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8426  size_t nextAlloc2ndIndex = 0;
    8427  while(lastOffset < freeSpace2ndTo1stEnd)
    8428  {
    8429  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8430  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8431  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8432  {
    8433  ++nextAlloc2ndIndex;
    8434  }
    8435 
    8436  // Found non-null allocation.
    8437  if(nextAlloc2ndIndex < suballoc2ndCount)
    8438  {
    8439  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8440 
    8441  // 1. Process free space before this allocation.
    8442  if(lastOffset < suballoc.offset)
    8443  {
    8444  // There is free space from lastOffset to suballoc.offset.
    8445  ++unusedRangeCount;
    8446  }
    8447 
    8448  // 2. Process this allocation.
    8449  // There is allocation with suballoc.offset, suballoc.size.
    8450  ++alloc2ndCount;
    8451  usedBytes += suballoc.size;
    8452 
    8453  // 3. Prepare for next iteration.
    8454  lastOffset = suballoc.offset + suballoc.size;
    8455  ++nextAlloc2ndIndex;
    8456  }
    8457  // We are at the end.
    8458  else
    8459  {
    8460  if(lastOffset < freeSpace2ndTo1stEnd)
    8461  {
    8462  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8463  ++unusedRangeCount;
    8464  }
    8465 
    8466  // End of loop.
    8467  lastOffset = freeSpace2ndTo1stEnd;
    8468  }
    8469  }
    8470  }
    8471 
    8472  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8473  size_t alloc1stCount = 0;
    8474  const VkDeviceSize freeSpace1stTo2ndEnd =
    8475  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8476  while(lastOffset < freeSpace1stTo2ndEnd)
    8477  {
    8478  // Find next non-null allocation or move nextAllocIndex to the end.
    8479  while(nextAlloc1stIndex < suballoc1stCount &&
    8480  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8481  {
    8482  ++nextAlloc1stIndex;
    8483  }
    8484 
    8485  // Found non-null allocation.
    8486  if(nextAlloc1stIndex < suballoc1stCount)
    8487  {
    8488  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8489 
    8490  // 1. Process free space before this allocation.
    8491  if(lastOffset < suballoc.offset)
    8492  {
    8493  // There is free space from lastOffset to suballoc.offset.
    8494  ++unusedRangeCount;
    8495  }
    8496 
    8497  // 2. Process this allocation.
    8498  // There is allocation with suballoc.offset, suballoc.size.
    8499  ++alloc1stCount;
    8500  usedBytes += suballoc.size;
    8501 
    8502  // 3. Prepare for next iteration.
    8503  lastOffset = suballoc.offset + suballoc.size;
    8504  ++nextAlloc1stIndex;
    8505  }
    8506  // We are at the end.
    8507  else
    8508  {
    8509  if(lastOffset < size)
    8510  {
    8511  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8512  ++unusedRangeCount;
    8513  }
    8514 
    8515  // End of loop.
    8516  lastOffset = freeSpace1stTo2ndEnd;
    8517  }
    8518  }
    8519 
    8520  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8521  {
    8522  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8523  while(lastOffset < size)
    8524  {
    8525  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8526  while(nextAlloc2ndIndex != SIZE_MAX &&
    8527  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8528  {
    8529  --nextAlloc2ndIndex;
    8530  }
    8531 
    8532  // Found non-null allocation.
    8533  if(nextAlloc2ndIndex != SIZE_MAX)
    8534  {
    8535  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8536 
    8537  // 1. Process free space before this allocation.
    8538  if(lastOffset < suballoc.offset)
    8539  {
    8540  // There is free space from lastOffset to suballoc.offset.
    8541  ++unusedRangeCount;
    8542  }
    8543 
    8544  // 2. Process this allocation.
    8545  // There is allocation with suballoc.offset, suballoc.size.
    8546  ++alloc2ndCount;
    8547  usedBytes += suballoc.size;
    8548 
    8549  // 3. Prepare for next iteration.
    8550  lastOffset = suballoc.offset + suballoc.size;
    8551  --nextAlloc2ndIndex;
    8552  }
    8553  // We are at the end.
    8554  else
    8555  {
    8556  if(lastOffset < size)
    8557  {
    8558  // There is free space from lastOffset to size.
    8559  ++unusedRangeCount;
    8560  }
    8561 
    8562  // End of loop.
    8563  lastOffset = size;
    8564  }
    8565  }
    8566  }
    8567 
    8568  const VkDeviceSize unusedBytes = size - usedBytes;
    8569  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8570 
    8571  // SECOND PASS
    8572  lastOffset = 0;
    8573 
    8574  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8575  {
    8576  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8577  size_t nextAlloc2ndIndex = 0;
    8578  while(lastOffset < freeSpace2ndTo1stEnd)
    8579  {
    8580  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8581  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8582  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8583  {
    8584  ++nextAlloc2ndIndex;
    8585  }
    8586 
    8587  // Found non-null allocation.
    8588  if(nextAlloc2ndIndex < suballoc2ndCount)
    8589  {
    8590  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8591 
    8592  // 1. Process free space before this allocation.
    8593  if(lastOffset < suballoc.offset)
    8594  {
    8595  // There is free space from lastOffset to suballoc.offset.
    8596  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8597  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8598  }
    8599 
    8600  // 2. Process this allocation.
    8601  // There is allocation with suballoc.offset, suballoc.size.
    8602  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8603 
    8604  // 3. Prepare for next iteration.
    8605  lastOffset = suballoc.offset + suballoc.size;
    8606  ++nextAlloc2ndIndex;
    8607  }
    8608  // We are at the end.
    8609  else
    8610  {
    8611  if(lastOffset < freeSpace2ndTo1stEnd)
    8612  {
    8613  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8614  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8615  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8616  }
    8617 
    8618  // End of loop.
    8619  lastOffset = freeSpace2ndTo1stEnd;
    8620  }
    8621  }
    8622  }
    8623 
    8624  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8625  while(lastOffset < freeSpace1stTo2ndEnd)
    8626  {
    8627  // Find next non-null allocation or move nextAllocIndex to the end.
    8628  while(nextAlloc1stIndex < suballoc1stCount &&
    8629  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8630  {
    8631  ++nextAlloc1stIndex;
    8632  }
    8633 
    8634  // Found non-null allocation.
    8635  if(nextAlloc1stIndex < suballoc1stCount)
    8636  {
    8637  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8638 
    8639  // 1. Process free space before this allocation.
    8640  if(lastOffset < suballoc.offset)
    8641  {
    8642  // There is free space from lastOffset to suballoc.offset.
    8643  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8644  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8645  }
    8646 
    8647  // 2. Process this allocation.
    8648  // There is allocation with suballoc.offset, suballoc.size.
    8649  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8650 
    8651  // 3. Prepare for next iteration.
    8652  lastOffset = suballoc.offset + suballoc.size;
    8653  ++nextAlloc1stIndex;
    8654  }
    8655  // We are at the end.
    8656  else
    8657  {
    8658  if(lastOffset < freeSpace1stTo2ndEnd)
    8659  {
    8660  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8661  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8662  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8663  }
    8664 
    8665  // End of loop.
    8666  lastOffset = freeSpace1stTo2ndEnd;
    8667  }
    8668  }
    8669 
    8670  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8671  {
    8672  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8673  while(lastOffset < size)
    8674  {
    8675  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8676  while(nextAlloc2ndIndex != SIZE_MAX &&
    8677  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8678  {
    8679  --nextAlloc2ndIndex;
    8680  }
    8681 
    8682  // Found non-null allocation.
    8683  if(nextAlloc2ndIndex != SIZE_MAX)
    8684  {
    8685  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8686 
    8687  // 1. Process free space before this allocation.
    8688  if(lastOffset < suballoc.offset)
    8689  {
    8690  // There is free space from lastOffset to suballoc.offset.
    8691  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8692  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8693  }
    8694 
    8695  // 2. Process this allocation.
    8696  // There is allocation with suballoc.offset, suballoc.size.
    8697  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8698 
    8699  // 3. Prepare for next iteration.
    8700  lastOffset = suballoc.offset + suballoc.size;
    8701  --nextAlloc2ndIndex;
    8702  }
    8703  // We are at the end.
    8704  else
    8705  {
    8706  if(lastOffset < size)
    8707  {
    8708  // There is free space from lastOffset to size.
    8709  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8710  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8711  }
    8712 
    8713  // End of loop.
    8714  lastOffset = size;
    8715  }
    8716  }
    8717  }
    8718 
    8719  PrintDetailedMap_End(json);
    8720 }
    8721 #endif // #if VMA_STATS_STRING_ENABLED
    8722 
    8723 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8724  uint32_t currentFrameIndex,
    8725  uint32_t frameInUseCount,
    8726  VkDeviceSize bufferImageGranularity,
    8727  VkDeviceSize allocSize,
    8728  VkDeviceSize allocAlignment,
    8729  bool upperAddress,
    8730  VmaSuballocationType allocType,
    8731  bool canMakeOtherLost,
    8732  uint32_t strategy,
    8733  VmaAllocationRequest* pAllocationRequest)
    8734 {
    8735  VMA_ASSERT(allocSize > 0);
    8736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8738  VMA_HEAVY_ASSERT(Validate());
    8739 
    8740  const VkDeviceSize size = GetSize();
    8741  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8742  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8743 
    8744  if(upperAddress)
    8745  {
    8746  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8747  {
    8748  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8749  return false;
    8750  }
    8751 
    8752  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8753  if(allocSize > size)
    8754  {
    8755  return false;
    8756  }
    8757  VkDeviceSize resultBaseOffset = size - allocSize;
    8758  if(!suballocations2nd.empty())
    8759  {
    8760  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8761  resultBaseOffset = lastSuballoc.offset - allocSize;
    8762  if(allocSize > lastSuballoc.offset)
    8763  {
    8764  return false;
    8765  }
    8766  }
    8767 
    8768  // Start from offset equal to end of free space.
    8769  VkDeviceSize resultOffset = resultBaseOffset;
    8770 
    8771  // Apply VMA_DEBUG_MARGIN at the end.
    8772  if(VMA_DEBUG_MARGIN > 0)
    8773  {
    8774  if(resultOffset < VMA_DEBUG_MARGIN)
    8775  {
    8776  return false;
    8777  }
    8778  resultOffset -= VMA_DEBUG_MARGIN;
    8779  }
    8780 
    8781  // Apply alignment.
    8782  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8783 
    8784  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8785  // Make bigger alignment if necessary.
    8786  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8787  {
    8788  bool bufferImageGranularityConflict = false;
    8789  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8790  {
    8791  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8792  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8793  {
    8794  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8795  {
    8796  bufferImageGranularityConflict = true;
    8797  break;
    8798  }
    8799  }
    8800  else
    8801  // Already on previous page.
    8802  break;
    8803  }
    8804  if(bufferImageGranularityConflict)
    8805  {
    8806  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8807  }
    8808  }
    8809 
    8810  // There is enough free space.
    8811  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8812  suballocations1st.back().offset + suballocations1st.back().size :
    8813  0;
    8814  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8815  {
    8816  // Check previous suballocations for BufferImageGranularity conflicts.
    8817  // If conflict exists, allocation cannot be made here.
    8818  if(bufferImageGranularity > 1)
    8819  {
    8820  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8821  {
    8822  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8823  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8824  {
    8825  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8826  {
    8827  return false;
    8828  }
    8829  }
    8830  else
    8831  {
    8832  // Already on next page.
    8833  break;
    8834  }
    8835  }
    8836  }
    8837 
    8838  // All tests passed: Success.
    8839  pAllocationRequest->offset = resultOffset;
    8840  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8841  pAllocationRequest->sumItemSize = 0;
    8842  // pAllocationRequest->item unused.
    8843  pAllocationRequest->itemsToMakeLostCount = 0;
    8844  return true;
    8845  }
    8846  }
    8847  else // !upperAddress
    8848  {
    8849  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8850  {
    8851  // Try to allocate at the end of 1st vector.
    8852 
    8853  VkDeviceSize resultBaseOffset = 0;
    8854  if(!suballocations1st.empty())
    8855  {
    8856  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8857  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8858  }
    8859 
    8860  // Start from offset equal to beginning of free space.
    8861  VkDeviceSize resultOffset = resultBaseOffset;
    8862 
    8863  // Apply VMA_DEBUG_MARGIN at the beginning.
    8864  if(VMA_DEBUG_MARGIN > 0)
    8865  {
    8866  resultOffset += VMA_DEBUG_MARGIN;
    8867  }
    8868 
    8869  // Apply alignment.
    8870  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8871 
    8872  // Check previous suballocations for BufferImageGranularity conflicts.
    8873  // Make bigger alignment if necessary.
    8874  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8875  {
    8876  bool bufferImageGranularityConflict = false;
    8877  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8878  {
    8879  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8880  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8881  {
    8882  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8883  {
    8884  bufferImageGranularityConflict = true;
    8885  break;
    8886  }
    8887  }
    8888  else
    8889  // Already on previous page.
    8890  break;
    8891  }
    8892  if(bufferImageGranularityConflict)
    8893  {
    8894  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8895  }
    8896  }
    8897 
    8898  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8899  suballocations2nd.back().offset : size;
    8900 
    8901  // There is enough free space at the end after alignment.
    8902  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8903  {
    8904  // Check next suballocations for BufferImageGranularity conflicts.
    8905  // If conflict exists, allocation cannot be made here.
    8906  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8907  {
    8908  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8909  {
    8910  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8911  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8912  {
    8913  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8914  {
    8915  return false;
    8916  }
    8917  }
    8918  else
    8919  {
    8920  // Already on previous page.
    8921  break;
    8922  }
    8923  }
    8924  }
    8925 
    8926  // All tests passed: Success.
    8927  pAllocationRequest->offset = resultOffset;
    8928  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8929  pAllocationRequest->sumItemSize = 0;
    8930  // pAllocationRequest->item unused.
    8931  pAllocationRequest->itemsToMakeLostCount = 0;
    8932  return true;
    8933  }
    8934  }
    8935 
    8936  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8937  // beginning of 1st vector as the end of free space.
    8938  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8939  {
    8940  VMA_ASSERT(!suballocations1st.empty());
    8941 
    8942  VkDeviceSize resultBaseOffset = 0;
    8943  if(!suballocations2nd.empty())
    8944  {
    8945  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8946  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8947  }
    8948 
    8949  // Start from offset equal to beginning of free space.
    8950  VkDeviceSize resultOffset = resultBaseOffset;
    8951 
    8952  // Apply VMA_DEBUG_MARGIN at the beginning.
    8953  if(VMA_DEBUG_MARGIN > 0)
    8954  {
    8955  resultOffset += VMA_DEBUG_MARGIN;
    8956  }
    8957 
    8958  // Apply alignment.
    8959  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8960 
    8961  // Check previous suballocations for BufferImageGranularity conflicts.
    8962  // Make bigger alignment if necessary.
    8963  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8964  {
    8965  bool bufferImageGranularityConflict = false;
    8966  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8967  {
    8968  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8969  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8970  {
    8971  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8972  {
    8973  bufferImageGranularityConflict = true;
    8974  break;
    8975  }
    8976  }
    8977  else
    8978  // Already on previous page.
    8979  break;
    8980  }
    8981  if(bufferImageGranularityConflict)
    8982  {
    8983  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8984  }
    8985  }
    8986 
    8987  pAllocationRequest->itemsToMakeLostCount = 0;
    8988  pAllocationRequest->sumItemSize = 0;
    8989  size_t index1st = m_1stNullItemsBeginCount;
    8990 
    8991  if(canMakeOtherLost)
    8992  {
    8993  while(index1st < suballocations1st.size() &&
    8994  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8995  {
    8996  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8997  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8998  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8999  {
    9000  // No problem.
    9001  }
    9002  else
    9003  {
    9004  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9005  if(suballoc.hAllocation->CanBecomeLost() &&
    9006  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9007  {
    9008  ++pAllocationRequest->itemsToMakeLostCount;
    9009  pAllocationRequest->sumItemSize += suballoc.size;
    9010  }
    9011  else
    9012  {
    9013  return false;
    9014  }
    9015  }
    9016  ++index1st;
    9017  }
    9018 
    9019  // Check next suballocations for BufferImageGranularity conflicts.
    9020  // If conflict exists, we must mark more allocations lost or fail.
    9021  if(bufferImageGranularity > 1)
    9022  {
    9023  while(index1st < suballocations1st.size())
    9024  {
    9025  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9026  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9027  {
    9028  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9029  {
    9030  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9031  if(suballoc.hAllocation->CanBecomeLost() &&
    9032  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9033  {
    9034  ++pAllocationRequest->itemsToMakeLostCount;
    9035  pAllocationRequest->sumItemSize += suballoc.size;
    9036  }
    9037  else
    9038  {
    9039  return false;
    9040  }
    9041  }
    9042  }
    9043  else
    9044  {
    9045  // Already on next page.
    9046  break;
    9047  }
    9048  ++index1st;
    9049  }
    9050  }
    9051  }
    9052 
    9053  // There is enough free space at the end after alignment.
    9054  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9055  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9056  {
    9057  // Check next suballocations for BufferImageGranularity conflicts.
    9058  // If conflict exists, allocation cannot be made here.
    9059  if(bufferImageGranularity > 1)
    9060  {
    9061  for(size_t nextSuballocIndex = index1st;
    9062  nextSuballocIndex < suballocations1st.size();
    9063  nextSuballocIndex++)
    9064  {
    9065  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9066  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9067  {
    9068  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9069  {
    9070  return false;
    9071  }
    9072  }
    9073  else
    9074  {
    9075  // Already on next page.
    9076  break;
    9077  }
    9078  }
    9079  }
    9080 
    9081  // All tests passed: Success.
    9082  pAllocationRequest->offset = resultOffset;
    9083  pAllocationRequest->sumFreeSize =
    9084  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9085  - resultBaseOffset
    9086  - pAllocationRequest->sumItemSize;
    9087  // pAllocationRequest->item unused.
    9088  return true;
    9089  }
    9090  }
    9091  }
    9092 
    9093  return false;
    9094 }
    9095 
    9096 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9097  uint32_t currentFrameIndex,
    9098  uint32_t frameInUseCount,
    9099  VmaAllocationRequest* pAllocationRequest)
    9100 {
    9101  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9102  {
    9103  return true;
    9104  }
    9105 
    9106  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9107 
    9108  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9109  size_t index1st = m_1stNullItemsBeginCount;
    9110  size_t madeLostCount = 0;
    9111  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9112  {
    9113  VMA_ASSERT(index1st < suballocations1st.size());
    9114  VmaSuballocation& suballoc = suballocations1st[index1st];
    9115  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9116  {
    9117  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9118  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9119  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9120  {
    9121  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9122  suballoc.hAllocation = VK_NULL_HANDLE;
    9123  m_SumFreeSize += suballoc.size;
    9124  ++m_1stNullItemsMiddleCount;
    9125  ++madeLostCount;
    9126  }
    9127  else
    9128  {
    9129  return false;
    9130  }
    9131  }
    9132  ++index1st;
    9133  }
    9134 
    9135  CleanupAfterFree();
    9136  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9137 
    9138  return true;
    9139 }
    9140 
    9141 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9142 {
    9143  uint32_t lostAllocationCount = 0;
    9144 
    9145  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9146  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9147  {
    9148  VmaSuballocation& suballoc = suballocations1st[i];
    9149  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9150  suballoc.hAllocation->CanBecomeLost() &&
    9151  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9152  {
    9153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9154  suballoc.hAllocation = VK_NULL_HANDLE;
    9155  ++m_1stNullItemsMiddleCount;
    9156  m_SumFreeSize += suballoc.size;
    9157  ++lostAllocationCount;
    9158  }
    9159  }
    9160 
    9161  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9162  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9163  {
    9164  VmaSuballocation& suballoc = suballocations2nd[i];
    9165  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9166  suballoc.hAllocation->CanBecomeLost() &&
    9167  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9168  {
    9169  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9170  suballoc.hAllocation = VK_NULL_HANDLE;
    9171  ++m_2ndNullItemsCount;
    9172  ++lostAllocationCount;
    9173  }
    9174  }
    9175 
    9176  if(lostAllocationCount)
    9177  {
    9178  CleanupAfterFree();
    9179  }
    9180 
    9181  return lostAllocationCount;
    9182 }
    9183 
    9184 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9185 {
    9186  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9187  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9188  {
    9189  const VmaSuballocation& suballoc = suballocations1st[i];
    9190  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9191  {
    9192  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9193  {
    9194  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9195  return VK_ERROR_VALIDATION_FAILED_EXT;
    9196  }
    9197  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9198  {
    9199  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9200  return VK_ERROR_VALIDATION_FAILED_EXT;
    9201  }
    9202  }
    9203  }
    9204 
    9205  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9206  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9207  {
    9208  const VmaSuballocation& suballoc = suballocations2nd[i];
    9209  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9210  {
    9211  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9212  {
    9213  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9214  return VK_ERROR_VALIDATION_FAILED_EXT;
    9215  }
    9216  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9217  {
    9218  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9219  return VK_ERROR_VALIDATION_FAILED_EXT;
    9220  }
    9221  }
    9222  }
    9223 
    9224  return VK_SUCCESS;
    9225 }
    9226 
    9227 void VmaBlockMetadata_Linear::Alloc(
    9228  const VmaAllocationRequest& request,
    9229  VmaSuballocationType type,
    9230  VkDeviceSize allocSize,
    9231  bool upperAddress,
    9232  VmaAllocation hAllocation)
    9233 {
    9234  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9235 
    9236  if(upperAddress)
    9237  {
    9238  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9239  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9240  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9241  suballocations2nd.push_back(newSuballoc);
    9242  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9243  }
    9244  else
    9245  {
    9246  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9247 
    9248  // First allocation.
    9249  if(suballocations1st.empty())
    9250  {
    9251  suballocations1st.push_back(newSuballoc);
    9252  }
    9253  else
    9254  {
    9255  // New allocation at the end of 1st vector.
    9256  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9257  {
    9258  // Check if it fits before the end of the block.
    9259  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9260  suballocations1st.push_back(newSuballoc);
    9261  }
    9262  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9263  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9264  {
    9265  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9266 
    9267  switch(m_2ndVectorMode)
    9268  {
    9269  case SECOND_VECTOR_EMPTY:
    9270  // First allocation from second part ring buffer.
    9271  VMA_ASSERT(suballocations2nd.empty());
    9272  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9273  break;
    9274  case SECOND_VECTOR_RING_BUFFER:
    9275  // 2-part ring buffer is already started.
    9276  VMA_ASSERT(!suballocations2nd.empty());
    9277  break;
    9278  case SECOND_VECTOR_DOUBLE_STACK:
    9279  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9280  break;
    9281  default:
    9282  VMA_ASSERT(0);
    9283  }
    9284 
    9285  suballocations2nd.push_back(newSuballoc);
    9286  }
    9287  else
    9288  {
    9289  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9290  }
    9291  }
    9292  }
    9293 
    9294  m_SumFreeSize -= newSuballoc.size;
    9295 }
    9296 
    9297 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9298 {
    9299  FreeAtOffset(allocation->GetOffset());
    9300 }
    9301 
    9302 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9303 {
    9304  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9305  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9306 
    9307  if(!suballocations1st.empty())
    9308  {
    9309  // First allocation: Mark it as next empty at the beginning.
    9310  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9311  if(firstSuballoc.offset == offset)
    9312  {
    9313  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9314  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9315  m_SumFreeSize += firstSuballoc.size;
    9316  ++m_1stNullItemsBeginCount;
    9317  CleanupAfterFree();
    9318  return;
    9319  }
    9320  }
    9321 
    9322  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9323  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9324  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9325  {
    9326  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9327  if(lastSuballoc.offset == offset)
    9328  {
    9329  m_SumFreeSize += lastSuballoc.size;
    9330  suballocations2nd.pop_back();
    9331  CleanupAfterFree();
    9332  return;
    9333  }
    9334  }
    9335  // Last allocation in 1st vector.
    9336  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9337  {
    9338  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9339  if(lastSuballoc.offset == offset)
    9340  {
    9341  m_SumFreeSize += lastSuballoc.size;
    9342  suballocations1st.pop_back();
    9343  CleanupAfterFree();
    9344  return;
    9345  }
    9346  }
    9347 
    9348  // Item from the middle of 1st vector.
    9349  {
    9350  VmaSuballocation refSuballoc;
    9351  refSuballoc.offset = offset;
    9352  // Rest of members stays uninitialized intentionally for better performance.
    9353  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9354  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9355  suballocations1st.end(),
    9356  refSuballoc);
    9357  if(it != suballocations1st.end())
    9358  {
    9359  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9360  it->hAllocation = VK_NULL_HANDLE;
    9361  ++m_1stNullItemsMiddleCount;
    9362  m_SumFreeSize += it->size;
    9363  CleanupAfterFree();
    9364  return;
    9365  }
    9366  }
    9367 
    9368  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9369  {
    9370  // Item from the middle of 2nd vector.
    9371  VmaSuballocation refSuballoc;
    9372  refSuballoc.offset = offset;
    9373  // Rest of members stays uninitialized intentionally for better performance.
    9374  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9375  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9376  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9377  if(it != suballocations2nd.end())
    9378  {
    9379  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9380  it->hAllocation = VK_NULL_HANDLE;
    9381  ++m_2ndNullItemsCount;
    9382  m_SumFreeSize += it->size;
    9383  CleanupAfterFree();
    9384  return;
    9385  }
    9386  }
    9387 
    9388  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9389 }
    9390 
    9391 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9392 {
    9393  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9394  const size_t suballocCount = AccessSuballocations1st().size();
    9395  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9396 }
    9397 
    9398 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9399 {
    9400  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9401  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9402 
    9403  if(IsEmpty())
    9404  {
    9405  suballocations1st.clear();
    9406  suballocations2nd.clear();
    9407  m_1stNullItemsBeginCount = 0;
    9408  m_1stNullItemsMiddleCount = 0;
    9409  m_2ndNullItemsCount = 0;
    9410  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9411  }
    9412  else
    9413  {
    9414  const size_t suballoc1stCount = suballocations1st.size();
    9415  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9416  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9417 
    9418  // Find more null items at the beginning of 1st vector.
    9419  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9420  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9421  {
    9422  ++m_1stNullItemsBeginCount;
    9423  --m_1stNullItemsMiddleCount;
    9424  }
    9425 
    9426  // Find more null items at the end of 1st vector.
    9427  while(m_1stNullItemsMiddleCount > 0 &&
    9428  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9429  {
    9430  --m_1stNullItemsMiddleCount;
    9431  suballocations1st.pop_back();
    9432  }
    9433 
    9434  // Find more null items at the end of 2nd vector.
    9435  while(m_2ndNullItemsCount > 0 &&
    9436  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9437  {
    9438  --m_2ndNullItemsCount;
    9439  suballocations2nd.pop_back();
    9440  }
    9441 
    9442  if(ShouldCompact1st())
    9443  {
    9444  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9445  size_t srcIndex = m_1stNullItemsBeginCount;
    9446  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9447  {
    9448  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9449  {
    9450  ++srcIndex;
    9451  }
    9452  if(dstIndex != srcIndex)
    9453  {
    9454  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9455  }
    9456  ++srcIndex;
    9457  }
    9458  suballocations1st.resize(nonNullItemCount);
    9459  m_1stNullItemsBeginCount = 0;
    9460  m_1stNullItemsMiddleCount = 0;
    9461  }
    9462 
    9463  // 2nd vector became empty.
    9464  if(suballocations2nd.empty())
    9465  {
    9466  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9467  }
    9468 
    9469  // 1st vector became empty.
    9470  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9471  {
    9472  suballocations1st.clear();
    9473  m_1stNullItemsBeginCount = 0;
    9474 
    9475  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9476  {
    9477  // Swap 1st with 2nd. Now 2nd is empty.
    9478  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9479  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9480  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9481  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9482  {
    9483  ++m_1stNullItemsBeginCount;
    9484  --m_1stNullItemsMiddleCount;
    9485  }
    9486  m_2ndNullItemsCount = 0;
    9487  m_1stVectorIndex ^= 1;
    9488  }
    9489  }
    9490  }
    9491 
    9492  VMA_HEAVY_ASSERT(Validate());
    9493 }
    9494 
    9495 
    9497 // class VmaBlockMetadata_Buddy
    9498 
    9499 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9500  VmaBlockMetadata(hAllocator),
    9501  m_Root(VMA_NULL),
    9502  m_AllocationCount(0),
    9503  m_FreeCount(1),
    9504  m_SumFreeSize(0)
    9505 {
    9506  memset(m_FreeList, 0, sizeof(m_FreeList));
    9507 }
    9508 
    9509 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9510 {
    9511  DeleteNode(m_Root);
    9512 }
    9513 
    9514 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9515 {
    9516  VmaBlockMetadata::Init(size);
    9517 
    9518  m_UsableSize = VmaPrevPow2(size);
    9519  m_SumFreeSize = m_UsableSize;
    9520 
    9521  // Calculate m_LevelCount.
    9522  m_LevelCount = 1;
    9523  while(m_LevelCount < MAX_LEVELS &&
    9524  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9525  {
    9526  ++m_LevelCount;
    9527  }
    9528 
    9529  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9530  rootNode->offset = 0;
    9531  rootNode->type = Node::TYPE_FREE;
    9532  rootNode->parent = VMA_NULL;
    9533  rootNode->buddy = VMA_NULL;
    9534 
    9535  m_Root = rootNode;
    9536  AddToFreeListFront(0, rootNode);
    9537 }
    9538 
    9539 bool VmaBlockMetadata_Buddy::Validate() const
    9540 {
    9541  // Validate tree.
    9542  ValidationContext ctx;
    9543  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9544  {
    9545  VMA_VALIDATE(false && "ValidateNode failed.");
    9546  }
    9547  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9548  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9549 
    9550  // Validate free node lists.
    9551  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9552  {
    9553  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9554  m_FreeList[level].front->free.prev == VMA_NULL);
    9555 
    9556  for(Node* node = m_FreeList[level].front;
    9557  node != VMA_NULL;
    9558  node = node->free.next)
    9559  {
    9560  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9561 
    9562  if(node->free.next == VMA_NULL)
    9563  {
    9564  VMA_VALIDATE(m_FreeList[level].back == node);
    9565  }
    9566  else
    9567  {
    9568  VMA_VALIDATE(node->free.next->free.prev == node);
    9569  }
    9570  }
    9571  }
    9572 
    9573  // Validate that free lists ar higher levels are empty.
    9574  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9575  {
    9576  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9577  }
    9578 
    9579  return true;
    9580 }
    9581 
    9582 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9583 {
    9584  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9585  {
    9586  if(m_FreeList[level].front != VMA_NULL)
    9587  {
    9588  return LevelToNodeSize(level);
    9589  }
    9590  }
    9591  return 0;
    9592 }
    9593 
    9594 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9595 {
    9596  const VkDeviceSize unusableSize = GetUnusableSize();
    9597 
    9598  outInfo.blockCount = 1;
    9599 
    9600  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9601  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9602 
    9603  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9604  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9605  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9606 
    9607  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9608 
    9609  if(unusableSize > 0)
    9610  {
    9611  ++outInfo.unusedRangeCount;
    9612  outInfo.unusedBytes += unusableSize;
    9613  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9614  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9615  }
    9616 }
    9617 
    9618 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9619 {
    9620  const VkDeviceSize unusableSize = GetUnusableSize();
    9621 
    9622  inoutStats.size += GetSize();
    9623  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9624  inoutStats.allocationCount += m_AllocationCount;
    9625  inoutStats.unusedRangeCount += m_FreeCount;
    9626  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9627 
    9628  if(unusableSize > 0)
    9629  {
    9630  ++inoutStats.unusedRangeCount;
    9631  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9632  }
    9633 }
    9634 
    9635 #if VMA_STATS_STRING_ENABLED
    9636 
    9637 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9638 {
    9639  // TODO optimize
    9640  VmaStatInfo stat;
    9641  CalcAllocationStatInfo(stat);
    9642 
    9643  PrintDetailedMap_Begin(
    9644  json,
    9645  stat.unusedBytes,
    9646  stat.allocationCount,
    9647  stat.unusedRangeCount);
    9648 
    9649  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9650 
    9651  const VkDeviceSize unusableSize = GetUnusableSize();
    9652  if(unusableSize > 0)
    9653  {
    9654  PrintDetailedMap_UnusedRange(json,
    9655  m_UsableSize, // offset
    9656  unusableSize); // size
    9657  }
    9658 
    9659  PrintDetailedMap_End(json);
    9660 }
    9661 
    9662 #endif // #if VMA_STATS_STRING_ENABLED
    9663 
    9664 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9665  uint32_t currentFrameIndex,
    9666  uint32_t frameInUseCount,
    9667  VkDeviceSize bufferImageGranularity,
    9668  VkDeviceSize allocSize,
    9669  VkDeviceSize allocAlignment,
    9670  bool upperAddress,
    9671  VmaSuballocationType allocType,
    9672  bool canMakeOtherLost,
    9673  uint32_t strategy,
    9674  VmaAllocationRequest* pAllocationRequest)
    9675 {
    9676  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9677 
    9678  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9679  // Whenever it might be an OPTIMAL image...
    9680  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9682  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9683  {
    9684  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9685  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9686  }
    9687 
    9688  if(allocSize > m_UsableSize)
    9689  {
    9690  return false;
    9691  }
    9692 
    9693  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9694  for(uint32_t level = targetLevel + 1; level--; )
    9695  {
    9696  for(Node* freeNode = m_FreeList[level].front;
    9697  freeNode != VMA_NULL;
    9698  freeNode = freeNode->free.next)
    9699  {
    9700  if(freeNode->offset % allocAlignment == 0)
    9701  {
    9702  pAllocationRequest->offset = freeNode->offset;
    9703  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9704  pAllocationRequest->sumItemSize = 0;
    9705  pAllocationRequest->itemsToMakeLostCount = 0;
    9706  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9707  return true;
    9708  }
    9709  }
    9710  }
    9711 
    9712  return false;
    9713 }
    9714 
    9715 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9716  uint32_t currentFrameIndex,
    9717  uint32_t frameInUseCount,
    9718  VmaAllocationRequest* pAllocationRequest)
    9719 {
    9720  /*
    9721  Lost allocations are not supported in buddy allocator at the moment.
    9722  Support might be added in the future.
    9723  */
    9724  return pAllocationRequest->itemsToMakeLostCount == 0;
    9725 }
    9726 
    9727 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9728 {
    9729  /*
    9730  Lost allocations are not supported in buddy allocator at the moment.
    9731  Support might be added in the future.
    9732  */
    9733  return 0;
    9734 }
    9735 
    9736 void VmaBlockMetadata_Buddy::Alloc(
    9737  const VmaAllocationRequest& request,
    9738  VmaSuballocationType type,
    9739  VkDeviceSize allocSize,
    9740  bool upperAddress,
    9741  VmaAllocation hAllocation)
    9742 {
    9743  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9744  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9745 
    9746  Node* currNode = m_FreeList[currLevel].front;
    9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9748  while(currNode->offset != request.offset)
    9749  {
    9750  currNode = currNode->free.next;
    9751  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9752  }
    9753 
    9754  // Go down, splitting free nodes.
    9755  while(currLevel < targetLevel)
    9756  {
    9757  // currNode is already first free node at currLevel.
    9758  // Remove it from list of free nodes at this currLevel.
    9759  RemoveFromFreeList(currLevel, currNode);
    9760 
    9761  const uint32_t childrenLevel = currLevel + 1;
    9762 
    9763  // Create two free sub-nodes.
    9764  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9765  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9766 
    9767  leftChild->offset = currNode->offset;
    9768  leftChild->type = Node::TYPE_FREE;
    9769  leftChild->parent = currNode;
    9770  leftChild->buddy = rightChild;
    9771 
    9772  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9773  rightChild->type = Node::TYPE_FREE;
    9774  rightChild->parent = currNode;
    9775  rightChild->buddy = leftChild;
    9776 
    9777  // Convert current currNode to split type.
    9778  currNode->type = Node::TYPE_SPLIT;
    9779  currNode->split.leftChild = leftChild;
    9780 
    9781  // Add child nodes to free list. Order is important!
    9782  AddToFreeListFront(childrenLevel, rightChild);
    9783  AddToFreeListFront(childrenLevel, leftChild);
    9784 
    9785  ++m_FreeCount;
    9786  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9787  ++currLevel;
    9788  currNode = m_FreeList[currLevel].front;
    9789 
    9790  /*
    9791  We can be sure that currNode, as left child of node previously split,
    9792  also fullfills the alignment requirement.
    9793  */
    9794  }
    9795 
    9796  // Remove from free list.
    9797  VMA_ASSERT(currLevel == targetLevel &&
    9798  currNode != VMA_NULL &&
    9799  currNode->type == Node::TYPE_FREE);
    9800  RemoveFromFreeList(currLevel, currNode);
    9801 
    9802  // Convert to allocation node.
    9803  currNode->type = Node::TYPE_ALLOCATION;
    9804  currNode->allocation.alloc = hAllocation;
    9805 
    9806  ++m_AllocationCount;
    9807  --m_FreeCount;
    9808  m_SumFreeSize -= allocSize;
    9809 }
    9810 
    9811 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9812 {
    9813  if(node->type == Node::TYPE_SPLIT)
    9814  {
    9815  DeleteNode(node->split.leftChild->buddy);
    9816  DeleteNode(node->split.leftChild);
    9817  }
    9818 
    9819  vma_delete(GetAllocationCallbacks(), node);
    9820 }
    9821 
    9822 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9823 {
    9824  VMA_VALIDATE(level < m_LevelCount);
    9825  VMA_VALIDATE(curr->parent == parent);
    9826  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9827  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9828  switch(curr->type)
    9829  {
    9830  case Node::TYPE_FREE:
    9831  // curr->free.prev, next are validated separately.
    9832  ctx.calculatedSumFreeSize += levelNodeSize;
    9833  ++ctx.calculatedFreeCount;
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  ++ctx.calculatedAllocationCount;
    9837  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9838  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9839  break;
    9840  case Node::TYPE_SPLIT:
    9841  {
    9842  const uint32_t childrenLevel = level + 1;
    9843  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = curr->split.leftChild;
    9845  VMA_VALIDATE(leftChild != VMA_NULL);
    9846  VMA_VALIDATE(leftChild->offset == curr->offset);
    9847  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9850  }
    9851  const Node* const rightChild = leftChild->buddy;
    9852  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9853  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9854  {
    9855  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9856  }
    9857  }
    9858  break;
    9859  default:
    9860  return false;
    9861  }
    9862 
    9863  return true;
    9864 }
    9865 
    9866 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9867 {
    9868  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9869  uint32_t level = 0;
    9870  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9871  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9872  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9873  {
    9874  ++level;
    9875  currLevelNodeSize = nextLevelNodeSize;
    9876  nextLevelNodeSize = currLevelNodeSize >> 1;
    9877  }
    9878  return level;
    9879 }
    9880 
    9881 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9882 {
    9883  // Find node and level.
    9884  Node* node = m_Root;
    9885  VkDeviceSize nodeOffset = 0;
    9886  uint32_t level = 0;
    9887  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9888  while(node->type == Node::TYPE_SPLIT)
    9889  {
    9890  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9891  if(offset < nodeOffset + nextLevelSize)
    9892  {
    9893  node = node->split.leftChild;
    9894  }
    9895  else
    9896  {
    9897  node = node->split.leftChild->buddy;
    9898  nodeOffset += nextLevelSize;
    9899  }
    9900  ++level;
    9901  levelNodeSize = nextLevelSize;
    9902  }
    9903 
    9904  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9905  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9906 
    9907  ++m_FreeCount;
    9908  --m_AllocationCount;
    9909  m_SumFreeSize += alloc->GetSize();
    9910 
    9911  node->type = Node::TYPE_FREE;
    9912 
    9913  // Join free nodes if possible.
    9914  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9915  {
    9916  RemoveFromFreeList(level, node->buddy);
    9917  Node* const parent = node->parent;
    9918 
    9919  vma_delete(GetAllocationCallbacks(), node->buddy);
    9920  vma_delete(GetAllocationCallbacks(), node);
    9921  parent->type = Node::TYPE_FREE;
    9922 
    9923  node = parent;
    9924  --level;
    9925  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9926  --m_FreeCount;
    9927  }
    9928 
    9929  AddToFreeListFront(level, node);
    9930 }
    9931 
    9932 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9933 {
    9934  switch(node->type)
    9935  {
    9936  case Node::TYPE_FREE:
    9937  ++outInfo.unusedRangeCount;
    9938  outInfo.unusedBytes += levelNodeSize;
    9939  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9940  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9941  break;
    9942  case Node::TYPE_ALLOCATION:
    9943  {
    9944  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9945  ++outInfo.allocationCount;
    9946  outInfo.usedBytes += allocSize;
    9947  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9948  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9949 
    9950  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9951  if(unusedRangeSize > 0)
    9952  {
    9953  ++outInfo.unusedRangeCount;
    9954  outInfo.unusedBytes += unusedRangeSize;
    9955  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9956  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9957  }
    9958  }
    9959  break;
    9960  case Node::TYPE_SPLIT:
    9961  {
    9962  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9963  const Node* const leftChild = node->split.leftChild;
    9964  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9965  const Node* const rightChild = leftChild->buddy;
    9966  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9967  }
    9968  break;
    9969  default:
    9970  VMA_ASSERT(0);
    9971  }
    9972 }
    9973 
    9974 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9975 {
    9976  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9977 
    9978  // List is empty.
    9979  Node* const frontNode = m_FreeList[level].front;
    9980  if(frontNode == VMA_NULL)
    9981  {
    9982  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9983  node->free.prev = node->free.next = VMA_NULL;
    9984  m_FreeList[level].front = m_FreeList[level].back = node;
    9985  }
    9986  else
    9987  {
    9988  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9989  node->free.prev = VMA_NULL;
    9990  node->free.next = frontNode;
    9991  frontNode->free.prev = node;
    9992  m_FreeList[level].front = node;
    9993  }
    9994 }
    9995 
    9996 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9997 {
    9998  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9999 
    10000  // It is at the front.
    10001  if(node->free.prev == VMA_NULL)
    10002  {
    10003  VMA_ASSERT(m_FreeList[level].front == node);
    10004  m_FreeList[level].front = node->free.next;
    10005  }
    10006  else
    10007  {
    10008  Node* const prevFreeNode = node->free.prev;
    10009  VMA_ASSERT(prevFreeNode->free.next == node);
    10010  prevFreeNode->free.next = node->free.next;
    10011  }
    10012 
    10013  // It is at the back.
    10014  if(node->free.next == VMA_NULL)
    10015  {
    10016  VMA_ASSERT(m_FreeList[level].back == node);
    10017  m_FreeList[level].back = node->free.prev;
    10018  }
    10019  else
    10020  {
    10021  Node* const nextFreeNode = node->free.next;
    10022  VMA_ASSERT(nextFreeNode->free.prev == node);
    10023  nextFreeNode->free.prev = node->free.prev;
    10024  }
    10025 }
    10026 
    10027 #if VMA_STATS_STRING_ENABLED
    10028 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10029 {
    10030  switch(node->type)
    10031  {
    10032  case Node::TYPE_FREE:
    10033  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10034  break;
    10035  case Node::TYPE_ALLOCATION:
    10036  {
    10037  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10038  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10039  if(allocSize < levelNodeSize)
    10040  {
    10041  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10042  }
    10043  }
    10044  break;
    10045  case Node::TYPE_SPLIT:
    10046  {
    10047  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10048  const Node* const leftChild = node->split.leftChild;
    10049  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10050  const Node* const rightChild = leftChild->buddy;
    10051  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10052  }
    10053  break;
    10054  default:
    10055  VMA_ASSERT(0);
    10056  }
    10057 }
    10058 #endif // #if VMA_STATS_STRING_ENABLED
    10059 
    10060 
    10062 // class VmaDeviceMemoryBlock
    10063 
    10064 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10065  m_pMetadata(VMA_NULL),
    10066  m_MemoryTypeIndex(UINT32_MAX),
    10067  m_Id(0),
    10068  m_hMemory(VK_NULL_HANDLE),
    10069  m_MapCount(0),
    10070  m_pMappedData(VMA_NULL)
    10071 {
    10072 }
    10073 
    10074 void VmaDeviceMemoryBlock::Init(
    10075  VmaAllocator hAllocator,
    10076  uint32_t newMemoryTypeIndex,
    10077  VkDeviceMemory newMemory,
    10078  VkDeviceSize newSize,
    10079  uint32_t id,
    10080  uint32_t algorithm)
    10081 {
    10082  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10083 
    10084  m_MemoryTypeIndex = newMemoryTypeIndex;
    10085  m_Id = id;
    10086  m_hMemory = newMemory;
    10087 
    10088  switch(algorithm)
    10089  {
    10091  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10092  break;
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10095  break;
    10096  default:
    10097  VMA_ASSERT(0);
    10098  // Fall-through.
    10099  case 0:
    10100  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10101  }
    10102  m_pMetadata->Init(newSize);
    10103 }
    10104 
    10105 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10106 {
    10107  // This is the most important assert in the entire library.
    10108  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10109  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10110 
    10111  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10112  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10113  m_hMemory = VK_NULL_HANDLE;
    10114 
    10115  vma_delete(allocator, m_pMetadata);
    10116  m_pMetadata = VMA_NULL;
    10117 }
    10118 
    10119 bool VmaDeviceMemoryBlock::Validate() const
    10120 {
    10121  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10122  (m_pMetadata->GetSize() != 0));
    10123 
    10124  return m_pMetadata->Validate();
    10125 }
    10126 
    10127 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10128 {
    10129  void* pData = nullptr;
    10130  VkResult res = Map(hAllocator, 1, &pData);
    10131  if(res != VK_SUCCESS)
    10132  {
    10133  return res;
    10134  }
    10135 
    10136  res = m_pMetadata->CheckCorruption(pData);
    10137 
    10138  Unmap(hAllocator, 1);
    10139 
    10140  return res;
    10141 }
    10142 
    10143 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10144 {
    10145  if(count == 0)
    10146  {
    10147  return VK_SUCCESS;
    10148  }
    10149 
    10150  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10151  if(m_MapCount != 0)
    10152  {
    10153  m_MapCount += count;
    10154  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10155  if(ppData != VMA_NULL)
    10156  {
    10157  *ppData = m_pMappedData;
    10158  }
    10159  return VK_SUCCESS;
    10160  }
    10161  else
    10162  {
    10163  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10164  hAllocator->m_hDevice,
    10165  m_hMemory,
    10166  0, // offset
    10167  VK_WHOLE_SIZE,
    10168  0, // flags
    10169  &m_pMappedData);
    10170  if(result == VK_SUCCESS)
    10171  {
    10172  if(ppData != VMA_NULL)
    10173  {
    10174  *ppData = m_pMappedData;
    10175  }
    10176  m_MapCount = count;
    10177  }
    10178  return result;
    10179  }
    10180 }
    10181 
    10182 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10183 {
    10184  if(count == 0)
    10185  {
    10186  return;
    10187  }
    10188 
    10189  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10190  if(m_MapCount >= count)
    10191  {
    10192  m_MapCount -= count;
    10193  if(m_MapCount == 0)
    10194  {
    10195  m_pMappedData = VMA_NULL;
    10196  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10197  }
    10198  }
    10199  else
    10200  {
    10201  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10202  }
    10203 }
    10204 
    10205 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10206 {
    10207  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10208  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10209 
    10210  void* pData;
    10211  VkResult res = Map(hAllocator, 1, &pData);
    10212  if(res != VK_SUCCESS)
    10213  {
    10214  return res;
    10215  }
    10216 
    10217  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10218  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10219 
    10220  Unmap(hAllocator, 1);
    10221 
    10222  return VK_SUCCESS;
    10223 }
    10224 
    10225 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10226 {
    10227  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10228  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10229 
    10230  void* pData;
    10231  VkResult res = Map(hAllocator, 1, &pData);
    10232  if(res != VK_SUCCESS)
    10233  {
    10234  return res;
    10235  }
    10236 
    10237  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10240  }
    10241  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10242  {
    10243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10244  }
    10245 
    10246  Unmap(hAllocator, 1);
    10247 
    10248  return VK_SUCCESS;
    10249 }
    10250 
    10251 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10252  const VmaAllocator hAllocator,
    10253  const VmaAllocation hAllocation,
    10254  VkBuffer hBuffer)
    10255 {
    10256  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10257  hAllocation->GetBlock() == this);
    10258  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10259  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10260  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10261  hAllocator->m_hDevice,
    10262  hBuffer,
    10263  m_hMemory,
    10264  hAllocation->GetOffset());
    10265 }
    10266 
    10267 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10268  const VmaAllocator hAllocator,
    10269  const VmaAllocation hAllocation,
    10270  VkImage hImage)
    10271 {
    10272  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10273  hAllocation->GetBlock() == this);
    10274  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10275  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10276  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10277  hAllocator->m_hDevice,
    10278  hImage,
    10279  m_hMemory,
    10280  hAllocation->GetOffset());
    10281 }
    10282 
    10283 static void InitStatInfo(VmaStatInfo& outInfo)
    10284 {
    10285  memset(&outInfo, 0, sizeof(outInfo));
    10286  outInfo.allocationSizeMin = UINT64_MAX;
    10287  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10288 }
    10289 
    10290 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10291 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10292 {
    10293  inoutInfo.blockCount += srcInfo.blockCount;
    10294  inoutInfo.allocationCount += srcInfo.allocationCount;
    10295  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10296  inoutInfo.usedBytes += srcInfo.usedBytes;
    10297  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10298  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10299  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10300  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10301  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10302 }
    10303 
    10304 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10305 {
    10306  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10307  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10308  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10309  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10310 }
    10311 
    10312 VmaPool_T::VmaPool_T(
    10313  VmaAllocator hAllocator,
    10314  const VmaPoolCreateInfo& createInfo,
    10315  VkDeviceSize preferredBlockSize) :
    10316  m_BlockVector(
    10317  hAllocator,
    10318  createInfo.memoryTypeIndex,
    10319  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10320  createInfo.minBlockCount,
    10321  createInfo.maxBlockCount,
    10322  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10323  createInfo.frameInUseCount,
    10324  true, // isCustomPool
    10325  createInfo.blockSize != 0, // explicitBlockSize
    10326  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10327  m_Id(0)
    10328 {
    10329 }
    10330 
    10331 VmaPool_T::~VmaPool_T()
    10332 {
    10333 }
    10334 
    10335 #if VMA_STATS_STRING_ENABLED
    10336 
    10337 #endif // #if VMA_STATS_STRING_ENABLED
    10338 
    10339 VmaBlockVector::VmaBlockVector(
    10340  VmaAllocator hAllocator,
    10341  uint32_t memoryTypeIndex,
    10342  VkDeviceSize preferredBlockSize,
    10343  size_t minBlockCount,
    10344  size_t maxBlockCount,
    10345  VkDeviceSize bufferImageGranularity,
    10346  uint32_t frameInUseCount,
    10347  bool isCustomPool,
    10348  bool explicitBlockSize,
    10349  uint32_t algorithm) :
    10350  m_hAllocator(hAllocator),
    10351  m_MemoryTypeIndex(memoryTypeIndex),
    10352  m_PreferredBlockSize(preferredBlockSize),
    10353  m_MinBlockCount(minBlockCount),
    10354  m_MaxBlockCount(maxBlockCount),
    10355  m_BufferImageGranularity(bufferImageGranularity),
    10356  m_FrameInUseCount(frameInUseCount),
    10357  m_IsCustomPool(isCustomPool),
    10358  m_ExplicitBlockSize(explicitBlockSize),
    10359  m_Algorithm(algorithm),
    10360  m_HasEmptyBlock(false),
    10361  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10362  m_pDefragmentator(VMA_NULL),
    10363  m_NextBlockId(0)
    10364 {
    10365 }
    10366 
    10367 VmaBlockVector::~VmaBlockVector()
    10368 {
    10369  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10370 
    10371  for(size_t i = m_Blocks.size(); i--; )
    10372  {
    10373  m_Blocks[i]->Destroy(m_hAllocator);
    10374  vma_delete(m_hAllocator, m_Blocks[i]);
    10375  }
    10376 }
    10377 
    10378 VkResult VmaBlockVector::CreateMinBlocks()
    10379 {
    10380  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10381  {
    10382  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10383  if(res != VK_SUCCESS)
    10384  {
    10385  return res;
    10386  }
    10387  }
    10388  return VK_SUCCESS;
    10389 }
    10390 
    10391 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10392 {
    10393  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10394 
    10395  const size_t blockCount = m_Blocks.size();
    10396 
    10397  pStats->size = 0;
    10398  pStats->unusedSize = 0;
    10399  pStats->allocationCount = 0;
    10400  pStats->unusedRangeCount = 0;
    10401  pStats->unusedRangeSizeMax = 0;
    10402  pStats->blockCount = blockCount;
    10403 
    10404  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10405  {
    10406  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10407  VMA_ASSERT(pBlock);
    10408  VMA_HEAVY_ASSERT(pBlock->Validate());
    10409  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10410  }
    10411 }
    10412 
    10413 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10414 {
    10415  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10416  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10417  (VMA_DEBUG_MARGIN > 0) &&
    10418  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10419 }
    10420 
    10421 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10422 
    10423 VkResult VmaBlockVector::Allocate(
    10424  VmaPool hCurrentPool,
    10425  uint32_t currentFrameIndex,
    10426  VkDeviceSize size,
    10427  VkDeviceSize alignment,
    10428  const VmaAllocationCreateInfo& createInfo,
    10429  VmaSuballocationType suballocType,
    10430  VmaAllocation* pAllocation)
    10431 {
    10432  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10433  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10434  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10435  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10436  const bool canCreateNewBlock =
    10437  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10438  (m_Blocks.size() < m_MaxBlockCount);
    10439  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10440 
    10441  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10442  // Which in turn is available only when maxBlockCount = 1.
    10443  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10444  {
    10445  canMakeOtherLost = false;
    10446  }
    10447 
    10448  // Upper address can only be used with linear allocator and within single memory block.
    10449  if(isUpperAddress &&
    10450  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10451  {
    10452  return VK_ERROR_FEATURE_NOT_PRESENT;
    10453  }
    10454 
    10455  // Validate strategy.
    10456  switch(strategy)
    10457  {
    10458  case 0:
    10460  break;
    10464  break;
    10465  default:
    10466  return VK_ERROR_FEATURE_NOT_PRESENT;
    10467  }
    10468 
    10469  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10470  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10471  {
    10472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10473  }
    10474 
    10475  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10476 
    10477  /*
    10478  Under certain condition, this whole section can be skipped for optimization, so
    10479  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10480  e.g. for custom pools with linear algorithm.
    10481  */
    10482  if(!canMakeOtherLost || canCreateNewBlock)
    10483  {
    10484  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10485  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10487 
    10488  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10489  {
    10490  // Use only last block.
    10491  if(!m_Blocks.empty())
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10494  VMA_ASSERT(pCurrBlock);
    10495  VkResult res = AllocateFromBlock(
    10496  pCurrBlock,
    10497  hCurrentPool,
    10498  currentFrameIndex,
    10499  size,
    10500  alignment,
    10501  allocFlagsCopy,
    10502  createInfo.pUserData,
    10503  suballocType,
    10504  strategy,
    10505  pAllocation);
    10506  if(res == VK_SUCCESS)
    10507  {
    10508  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10509  return VK_SUCCESS;
    10510  }
    10511  }
    10512  }
    10513  else
    10514  {
    10516  {
    10517  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10518  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10519  {
    10520  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10521  VMA_ASSERT(pCurrBlock);
    10522  VkResult res = AllocateFromBlock(
    10523  pCurrBlock,
    10524  hCurrentPool,
    10525  currentFrameIndex,
    10526  size,
    10527  alignment,
    10528  allocFlagsCopy,
    10529  createInfo.pUserData,
    10530  suballocType,
    10531  strategy,
    10532  pAllocation);
    10533  if(res == VK_SUCCESS)
    10534  {
    10535  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10536  return VK_SUCCESS;
    10537  }
    10538  }
    10539  }
    10540  else // WORST_FIT, FIRST_FIT
    10541  {
    10542  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10543  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10544  {
    10545  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10546  VMA_ASSERT(pCurrBlock);
    10547  VkResult res = AllocateFromBlock(
    10548  pCurrBlock,
    10549  hCurrentPool,
    10550  currentFrameIndex,
    10551  size,
    10552  alignment,
    10553  allocFlagsCopy,
    10554  createInfo.pUserData,
    10555  suballocType,
    10556  strategy,
    10557  pAllocation);
    10558  if(res == VK_SUCCESS)
    10559  {
    10560  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10561  return VK_SUCCESS;
    10562  }
    10563  }
    10564  }
    10565  }
    10566 
    10567  // 2. Try to create new block.
    10568  if(canCreateNewBlock)
    10569  {
    10570  // Calculate optimal size for new block.
    10571  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10572  uint32_t newBlockSizeShift = 0;
    10573  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10574 
    10575  if(!m_ExplicitBlockSize)
    10576  {
    10577  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10578  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10579  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10580  {
    10581  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10582  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10583  {
    10584  newBlockSize = smallerNewBlockSize;
    10585  ++newBlockSizeShift;
    10586  }
    10587  else
    10588  {
    10589  break;
    10590  }
    10591  }
    10592  }
    10593 
    10594  size_t newBlockIndex = 0;
    10595  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10596  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10597  if(!m_ExplicitBlockSize)
    10598  {
    10599  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10600  {
    10601  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10602  if(smallerNewBlockSize >= size)
    10603  {
    10604  newBlockSize = smallerNewBlockSize;
    10605  ++newBlockSizeShift;
    10606  res = CreateBlock(newBlockSize, &newBlockIndex);
    10607  }
    10608  else
    10609  {
    10610  break;
    10611  }
    10612  }
    10613  }
    10614 
    10615  if(res == VK_SUCCESS)
    10616  {
    10617  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10618  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10619 
    10620  res = AllocateFromBlock(
    10621  pBlock,
    10622  hCurrentPool,
    10623  currentFrameIndex,
    10624  size,
    10625  alignment,
    10626  allocFlagsCopy,
    10627  createInfo.pUserData,
    10628  suballocType,
    10629  strategy,
    10630  pAllocation);
    10631  if(res == VK_SUCCESS)
    10632  {
    10633  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10634  return VK_SUCCESS;
    10635  }
    10636  else
    10637  {
    10638  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10639  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10640  }
    10641  }
    10642  }
    10643  }
    10644 
    10645  // 3. Try to allocate from existing blocks with making other allocations lost.
    10646  if(canMakeOtherLost)
    10647  {
    10648  uint32_t tryIndex = 0;
    10649  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10650  {
    10651  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10652  VmaAllocationRequest bestRequest = {};
    10653  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10654 
    10655  // 1. Search existing allocations.
    10657  {
    10658  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10659  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10660  {
    10661  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10662  VMA_ASSERT(pCurrBlock);
    10663  VmaAllocationRequest currRequest = {};
    10664  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10665  currentFrameIndex,
    10666  m_FrameInUseCount,
    10667  m_BufferImageGranularity,
    10668  size,
    10669  alignment,
    10670  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10671  suballocType,
    10672  canMakeOtherLost,
    10673  strategy,
    10674  &currRequest))
    10675  {
    10676  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10677  if(pBestRequestBlock == VMA_NULL ||
    10678  currRequestCost < bestRequestCost)
    10679  {
    10680  pBestRequestBlock = pCurrBlock;
    10681  bestRequest = currRequest;
    10682  bestRequestCost = currRequestCost;
    10683 
    10684  if(bestRequestCost == 0)
    10685  {
    10686  break;
    10687  }
    10688  }
    10689  }
    10690  }
    10691  }
    10692  else // WORST_FIT, FIRST_FIT
    10693  {
    10694  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10695  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10696  {
    10697  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10698  VMA_ASSERT(pCurrBlock);
    10699  VmaAllocationRequest currRequest = {};
    10700  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10701  currentFrameIndex,
    10702  m_FrameInUseCount,
    10703  m_BufferImageGranularity,
    10704  size,
    10705  alignment,
    10706  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10707  suballocType,
    10708  canMakeOtherLost,
    10709  strategy,
    10710  &currRequest))
    10711  {
    10712  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10713  if(pBestRequestBlock == VMA_NULL ||
    10714  currRequestCost < bestRequestCost ||
    10716  {
    10717  pBestRequestBlock = pCurrBlock;
    10718  bestRequest = currRequest;
    10719  bestRequestCost = currRequestCost;
    10720 
    10721  if(bestRequestCost == 0 ||
    10723  {
    10724  break;
    10725  }
    10726  }
    10727  }
    10728  }
    10729  }
    10730 
    10731  if(pBestRequestBlock != VMA_NULL)
    10732  {
    10733  if(mapped)
    10734  {
    10735  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10736  if(res != VK_SUCCESS)
    10737  {
    10738  return res;
    10739  }
    10740  }
    10741 
    10742  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10743  currentFrameIndex,
    10744  m_FrameInUseCount,
    10745  &bestRequest))
    10746  {
    10747  // We no longer have an empty Allocation.
    10748  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752  // Allocate from this pBlock.
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBestRequestBlock,
    10758  bestRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10765  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10766  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10767  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10768  {
    10769  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10770  }
    10771  if(IsCorruptionDetectionEnabled())
    10772  {
    10773  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10774  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10775  }
    10776  return VK_SUCCESS;
    10777  }
    10778  // else: Some allocations must have been touched while we are here. Next try.
    10779  }
    10780  else
    10781  {
    10782  // Could not find place in any of the blocks - break outer loop.
    10783  break;
    10784  }
    10785  }
    10786  /* Maximum number of tries exceeded - a very unlike event when many other
    10787  threads are simultaneously touching allocations making it impossible to make
    10788  lost at the same time as we try to allocate. */
    10789  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10790  {
    10791  return VK_ERROR_TOO_MANY_OBJECTS;
    10792  }
    10793  }
    10794 
    10795  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10796 }
    10797 
    10798 void VmaBlockVector::Free(
    10799  VmaAllocation hAllocation)
    10800 {
    10801  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10802 
    10803  // Scope for lock.
    10804  {
    10805  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10806 
    10807  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10808 
    10809  if(IsCorruptionDetectionEnabled())
    10810  {
    10811  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10812  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10813  }
    10814 
    10815  if(hAllocation->IsPersistentMap())
    10816  {
    10817  pBlock->Unmap(m_hAllocator, 1);
    10818  }
    10819 
    10820  pBlock->m_pMetadata->Free(hAllocation);
    10821  VMA_HEAVY_ASSERT(pBlock->Validate());
    10822 
    10823  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10824 
    10825  // pBlock became empty after this deallocation.
    10826  if(pBlock->m_pMetadata->IsEmpty())
    10827  {
    10828  // Already has empty Allocation. We don't want to have two, so delete this one.
    10829  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10830  {
    10831  pBlockToDelete = pBlock;
    10832  Remove(pBlock);
    10833  }
    10834  // We now have first empty block.
    10835  else
    10836  {
    10837  m_HasEmptyBlock = true;
    10838  }
    10839  }
    10840  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10841  // (This is optional, heuristics.)
    10842  else if(m_HasEmptyBlock)
    10843  {
    10844  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10845  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10846  {
    10847  pBlockToDelete = pLastBlock;
    10848  m_Blocks.pop_back();
    10849  m_HasEmptyBlock = false;
    10850  }
    10851  }
    10852 
    10853  IncrementallySortBlocks();
    10854  }
    10855 
    10856  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10857  // lock, for performance reason.
    10858  if(pBlockToDelete != VMA_NULL)
    10859  {
    10860  VMA_DEBUG_LOG(" Deleted empty allocation");
    10861  pBlockToDelete->Destroy(m_hAllocator);
    10862  vma_delete(m_hAllocator, pBlockToDelete);
    10863  }
    10864 }
    10865 
    10866 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10867 {
    10868  VkDeviceSize result = 0;
    10869  for(size_t i = m_Blocks.size(); i--; )
    10870  {
    10871  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10872  if(result >= m_PreferredBlockSize)
    10873  {
    10874  break;
    10875  }
    10876  }
    10877  return result;
    10878 }
    10879 
    10880 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10881 {
    10882  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10883  {
    10884  if(m_Blocks[blockIndex] == pBlock)
    10885  {
    10886  VmaVectorRemove(m_Blocks, blockIndex);
    10887  return;
    10888  }
    10889  }
    10890  VMA_ASSERT(0);
    10891 }
    10892 
    10893 void VmaBlockVector::IncrementallySortBlocks()
    10894 {
    10895  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10896  {
    10897  // Bubble sort only until first swap.
    10898  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10899  {
    10900  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10901  {
    10902  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10903  return;
    10904  }
    10905  }
    10906  }
    10907 }
    10908 
    10909 VkResult VmaBlockVector::AllocateFromBlock(
    10910  VmaDeviceMemoryBlock* pBlock,
    10911  VmaPool hCurrentPool,
    10912  uint32_t currentFrameIndex,
    10913  VkDeviceSize size,
    10914  VkDeviceSize alignment,
    10915  VmaAllocationCreateFlags allocFlags,
    10916  void* pUserData,
    10917  VmaSuballocationType suballocType,
    10918  uint32_t strategy,
    10919  VmaAllocation* pAllocation)
    10920 {
    10921  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10922  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10923  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10924  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10925 
    10926  VmaAllocationRequest currRequest = {};
    10927  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10928  currentFrameIndex,
    10929  m_FrameInUseCount,
    10930  m_BufferImageGranularity,
    10931  size,
    10932  alignment,
    10933  isUpperAddress,
    10934  suballocType,
    10935  false, // canMakeOtherLost
    10936  strategy,
    10937  &currRequest))
    10938  {
    10939  // Allocate from pCurrBlock.
    10940  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10941 
    10942  if(mapped)
    10943  {
    10944  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10945  if(res != VK_SUCCESS)
    10946  {
    10947  return res;
    10948  }
    10949  }
    10950 
    10951  // We no longer have an empty Allocation.
    10952  if(pBlock->m_pMetadata->IsEmpty())
    10953  {
    10954  m_HasEmptyBlock = false;
    10955  }
    10956 
    10957  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10958  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10959  (*pAllocation)->InitBlockAllocation(
    10960  hCurrentPool,
    10961  pBlock,
    10962  currRequest.offset,
    10963  alignment,
    10964  size,
    10965  suballocType,
    10966  mapped,
    10967  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10968  VMA_HEAVY_ASSERT(pBlock->Validate());
    10969  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10970  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10971  {
    10972  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10973  }
    10974  if(IsCorruptionDetectionEnabled())
    10975  {
    10976  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10977  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10978  }
    10979  return VK_SUCCESS;
    10980  }
    10981  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10982 }
    10983 
    10984 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10985 {
    10986  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10987  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10988  allocInfo.allocationSize = blockSize;
    10989  VkDeviceMemory mem = VK_NULL_HANDLE;
    10990  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10991  if(res < 0)
    10992  {
    10993  return res;
    10994  }
    10995 
    10996  // New VkDeviceMemory successfully created.
    10997 
    10998  // Create new Allocation for it.
    10999  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11000  pBlock->Init(
    11001  m_hAllocator,
    11002  m_MemoryTypeIndex,
    11003  mem,
    11004  allocInfo.allocationSize,
    11005  m_NextBlockId++,
    11006  m_Algorithm);
    11007 
    11008  m_Blocks.push_back(pBlock);
    11009  if(pNewBlockIndex != VMA_NULL)
    11010  {
    11011  *pNewBlockIndex = m_Blocks.size() - 1;
    11012  }
    11013 
    11014  return VK_SUCCESS;
    11015 }
    11016 
    11017 #if VMA_STATS_STRING_ENABLED
    11018 
    11019 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11020 {
    11021  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11022 
    11023  json.BeginObject();
    11024 
    11025  if(m_IsCustomPool)
    11026  {
    11027  json.WriteString("MemoryTypeIndex");
    11028  json.WriteNumber(m_MemoryTypeIndex);
    11029 
    11030  json.WriteString("BlockSize");
    11031  json.WriteNumber(m_PreferredBlockSize);
    11032 
    11033  json.WriteString("BlockCount");
    11034  json.BeginObject(true);
    11035  if(m_MinBlockCount > 0)
    11036  {
    11037  json.WriteString("Min");
    11038  json.WriteNumber((uint64_t)m_MinBlockCount);
    11039  }
    11040  if(m_MaxBlockCount < SIZE_MAX)
    11041  {
    11042  json.WriteString("Max");
    11043  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11044  }
    11045  json.WriteString("Cur");
    11046  json.WriteNumber((uint64_t)m_Blocks.size());
    11047  json.EndObject();
    11048 
    11049  if(m_FrameInUseCount > 0)
    11050  {
    11051  json.WriteString("FrameInUseCount");
    11052  json.WriteNumber(m_FrameInUseCount);
    11053  }
    11054 
    11055  if(m_Algorithm != 0)
    11056  {
    11057  json.WriteString("Algorithm");
    11058  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11059  }
    11060  }
    11061  else
    11062  {
    11063  json.WriteString("PreferredBlockSize");
    11064  json.WriteNumber(m_PreferredBlockSize);
    11065  }
    11066 
    11067  json.WriteString("Blocks");
    11068  json.BeginObject();
    11069  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11070  {
    11071  json.BeginString();
    11072  json.ContinueString(m_Blocks[i]->GetId());
    11073  json.EndString();
    11074 
    11075  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11076  }
    11077  json.EndObject();
    11078 
    11079  json.EndObject();
    11080 }
    11081 
    11082 #endif // #if VMA_STATS_STRING_ENABLED
    11083 
    11084 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11085  VmaAllocator hAllocator,
    11086  uint32_t currentFrameIndex)
    11087 {
    11088  if(m_pDefragmentator == VMA_NULL)
    11089  {
    11090  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11091  hAllocator,
    11092  this,
    11093  currentFrameIndex);
    11094  }
    11095 
    11096  return m_pDefragmentator;
    11097 }
    11098 
    11099 VkResult VmaBlockVector::Defragment(
    11100  VmaDefragmentationStats* pDefragmentationStats,
    11101  VkDeviceSize& maxBytesToMove,
    11102  uint32_t& maxAllocationsToMove)
    11103 {
    11104  if(m_pDefragmentator == VMA_NULL)
    11105  {
    11106  return VK_SUCCESS;
    11107  }
    11108 
    11109  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11110 
    11111  // Defragment.
    11112  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11113 
    11114  // Accumulate statistics.
    11115  if(pDefragmentationStats != VMA_NULL)
    11116  {
    11117  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11118  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11119  pDefragmentationStats->bytesMoved += bytesMoved;
    11120  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11121  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11122  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11123  maxBytesToMove -= bytesMoved;
    11124  maxAllocationsToMove -= allocationsMoved;
    11125  }
    11126 
    11127  // Free empty blocks.
    11128  m_HasEmptyBlock = false;
    11129  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11130  {
    11131  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11132  if(pBlock->m_pMetadata->IsEmpty())
    11133  {
    11134  if(m_Blocks.size() > m_MinBlockCount)
    11135  {
    11136  if(pDefragmentationStats != VMA_NULL)
    11137  {
    11138  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11139  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11140  }
    11141 
    11142  VmaVectorRemove(m_Blocks, blockIndex);
    11143  pBlock->Destroy(m_hAllocator);
    11144  vma_delete(m_hAllocator, pBlock);
    11145  }
    11146  else
    11147  {
    11148  m_HasEmptyBlock = true;
    11149  }
    11150  }
    11151  }
    11152 
    11153  return result;
    11154 }
    11155 
    11156 void VmaBlockVector::DestroyDefragmentator()
    11157 {
    11158  if(m_pDefragmentator != VMA_NULL)
    11159  {
    11160  vma_delete(m_hAllocator, m_pDefragmentator);
    11161  m_pDefragmentator = VMA_NULL;
    11162  }
    11163 }
    11164 
    11165 void VmaBlockVector::MakePoolAllocationsLost(
    11166  uint32_t currentFrameIndex,
    11167  size_t* pLostAllocationCount)
    11168 {
    11169  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11170  size_t lostAllocationCount = 0;
    11171  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11172  {
    11173  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11174  VMA_ASSERT(pBlock);
    11175  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11176  }
    11177  if(pLostAllocationCount != VMA_NULL)
    11178  {
    11179  *pLostAllocationCount = lostAllocationCount;
    11180  }
    11181 }
    11182 
    11183 VkResult VmaBlockVector::CheckCorruption()
    11184 {
    11185  if(!IsCorruptionDetectionEnabled())
    11186  {
    11187  return VK_ERROR_FEATURE_NOT_PRESENT;
    11188  }
    11189 
    11190  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11191  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11192  {
    11193  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11194  VMA_ASSERT(pBlock);
    11195  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11196  if(res != VK_SUCCESS)
    11197  {
    11198  return res;
    11199  }
    11200  }
    11201  return VK_SUCCESS;
    11202 }
    11203 
    11204 void VmaBlockVector::AddStats(VmaStats* pStats)
    11205 {
    11206  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11207  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11208 
    11209  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11210 
    11211  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11212  {
    11213  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11214  VMA_ASSERT(pBlock);
    11215  VMA_HEAVY_ASSERT(pBlock->Validate());
    11216  VmaStatInfo allocationStatInfo;
    11217  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11218  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11220  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11221  }
    11222 }
    11223 
    11225 // VmaDefragmentator members definition
    11226 
    11227 VmaDefragmentator::VmaDefragmentator(
    11228  VmaAllocator hAllocator,
    11229  VmaBlockVector* pBlockVector,
    11230  uint32_t currentFrameIndex) :
    11231  m_hAllocator(hAllocator),
    11232  m_pBlockVector(pBlockVector),
    11233  m_CurrentFrameIndex(currentFrameIndex),
    11234  m_BytesMoved(0),
    11235  m_AllocationsMoved(0),
    11236  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11237  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11238 {
    11239  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11240 }
    11241 
    11242 VmaDefragmentator::~VmaDefragmentator()
    11243 {
    11244  for(size_t i = m_Blocks.size(); i--; )
    11245  {
    11246  vma_delete(m_hAllocator, m_Blocks[i]);
    11247  }
    11248 }
    11249 
    11250 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11251 {
    11252  AllocationInfo allocInfo;
    11253  allocInfo.m_hAllocation = hAlloc;
    11254  allocInfo.m_pChanged = pChanged;
    11255  m_Allocations.push_back(allocInfo);
    11256 }
    11257 
    11258 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11259 {
    11260  // It has already been mapped for defragmentation.
    11261  if(m_pMappedDataForDefragmentation)
    11262  {
    11263  *ppMappedData = m_pMappedDataForDefragmentation;
    11264  return VK_SUCCESS;
    11265  }
    11266 
    11267  // It is originally mapped.
    11268  if(m_pBlock->GetMappedData())
    11269  {
    11270  *ppMappedData = m_pBlock->GetMappedData();
    11271  return VK_SUCCESS;
    11272  }
    11273 
    11274  // Map on first usage.
    11275  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11276  *ppMappedData = m_pMappedDataForDefragmentation;
    11277  return res;
    11278 }
    11279 
    11280 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11281 {
    11282  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11283  {
    11284  m_pBlock->Unmap(hAllocator, 1);
    11285  }
    11286 }
    11287 
    11288 VkResult VmaDefragmentator::DefragmentRound(
    11289  VkDeviceSize maxBytesToMove,
    11290  uint32_t maxAllocationsToMove)
    11291 {
    11292  if(m_Blocks.empty())
    11293  {
    11294  return VK_SUCCESS;
    11295  }
    11296 
    11297  size_t srcBlockIndex = m_Blocks.size() - 1;
    11298  size_t srcAllocIndex = SIZE_MAX;
    11299  for(;;)
    11300  {
    11301  // 1. Find next allocation to move.
    11302  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11303  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11304  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11305  {
    11306  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11307  {
    11308  // Finished: no more allocations to process.
    11309  if(srcBlockIndex == 0)
    11310  {
    11311  return VK_SUCCESS;
    11312  }
    11313  else
    11314  {
    11315  --srcBlockIndex;
    11316  srcAllocIndex = SIZE_MAX;
    11317  }
    11318  }
    11319  else
    11320  {
    11321  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11322  }
    11323  }
    11324 
    11325  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11326  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11327 
    11328  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11329  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11330  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11331  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11332 
    11333  // 2. Try to find new place for this allocation in preceding or current block.
    11334  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11335  {
    11336  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11337  VmaAllocationRequest dstAllocRequest;
    11338  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11339  m_CurrentFrameIndex,
    11340  m_pBlockVector->GetFrameInUseCount(),
    11341  m_pBlockVector->GetBufferImageGranularity(),
    11342  size,
    11343  alignment,
    11344  false, // upperAddress
    11345  suballocType,
    11346  false, // canMakeOtherLost
    11348  &dstAllocRequest) &&
    11349  MoveMakesSense(
    11350  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11351  {
    11352  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11353 
    11354  // Reached limit on number of allocations or bytes to move.
    11355  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11356  (m_BytesMoved + size > maxBytesToMove))
    11357  {
    11358  return VK_INCOMPLETE;
    11359  }
    11360 
    11361  void* pDstMappedData = VMA_NULL;
    11362  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11363  if(res != VK_SUCCESS)
    11364  {
    11365  return res;
    11366  }
    11367 
    11368  void* pSrcMappedData = VMA_NULL;
    11369  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11370  if(res != VK_SUCCESS)
    11371  {
    11372  return res;
    11373  }
    11374 
    11375  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11376  memcpy(
    11377  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11378  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11379  static_cast<size_t>(size));
    11380 
    11381  if(VMA_DEBUG_MARGIN > 0)
    11382  {
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11384  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11385  }
    11386 
    11387  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11388  dstAllocRequest,
    11389  suballocType,
    11390  size,
    11391  false, // upperAddress
    11392  allocInfo.m_hAllocation);
    11393  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11394 
    11395  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11396 
    11397  if(allocInfo.m_pChanged != VMA_NULL)
    11398  {
    11399  *allocInfo.m_pChanged = VK_TRUE;
    11400  }
    11401 
    11402  ++m_AllocationsMoved;
    11403  m_BytesMoved += size;
    11404 
    11405  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11406 
    11407  break;
    11408  }
    11409  }
    11410 
    11411  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11412 
    11413  if(srcAllocIndex > 0)
    11414  {
    11415  --srcAllocIndex;
    11416  }
    11417  else
    11418  {
    11419  if(srcBlockIndex > 0)
    11420  {
    11421  --srcBlockIndex;
    11422  srcAllocIndex = SIZE_MAX;
    11423  }
    11424  else
    11425  {
    11426  return VK_SUCCESS;
    11427  }
    11428  }
    11429  }
    11430 }
    11431 
    11432 VkResult VmaDefragmentator::Defragment(
    11433  VkDeviceSize maxBytesToMove,
    11434  uint32_t maxAllocationsToMove)
    11435 {
    11436  if(m_Allocations.empty())
    11437  {
    11438  return VK_SUCCESS;
    11439  }
    11440 
    11441  // Create block info for each block.
    11442  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11443  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11444  {
    11445  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11446  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11447  m_Blocks.push_back(pBlockInfo);
    11448  }
    11449 
    11450  // Sort them by m_pBlock pointer value.
    11451  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11452 
    11453  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11454  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11455  {
    11456  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11457  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11458  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11459  {
    11460  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11461  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11462  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11463  {
    11464  (*it)->m_Allocations.push_back(allocInfo);
    11465  }
    11466  else
    11467  {
    11468  VMA_ASSERT(0);
    11469  }
    11470  }
    11471  }
    11472  m_Allocations.clear();
    11473 
    11474  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11475  {
    11476  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11477  pBlockInfo->CalcHasNonMovableAllocations();
    11478  pBlockInfo->SortAllocationsBySizeDescecnding();
    11479  }
    11480 
    11481  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11482  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11483 
    11484  // Execute defragmentation rounds (the main part).
    11485  VkResult result = VK_SUCCESS;
    11486  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11487  {
    11488  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11489  }
    11490 
    11491  // Unmap blocks that were mapped for defragmentation.
    11492  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11493  {
    11494  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11495  }
    11496 
    11497  return result;
    11498 }
    11499 
    11500 bool VmaDefragmentator::MoveMakesSense(
    11501  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11502  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11503 {
    11504  if(dstBlockIndex < srcBlockIndex)
    11505  {
    11506  return true;
    11507  }
    11508  if(dstBlockIndex > srcBlockIndex)
    11509  {
    11510  return false;
    11511  }
    11512  if(dstOffset < srcOffset)
    11513  {
    11514  return true;
    11515  }
    11516  return false;
    11517 }
    11518 
    11520 // VmaRecorder
    11521 
    11522 #if VMA_RECORDING_ENABLED
    11523 
    11524 VmaRecorder::VmaRecorder() :
    11525  m_UseMutex(true),
    11526  m_Flags(0),
    11527  m_File(VMA_NULL),
    11528  m_Freq(INT64_MAX),
    11529  m_StartCounter(INT64_MAX)
    11530 {
    11531 }
    11532 
    11533 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11534 {
    11535  m_UseMutex = useMutex;
    11536  m_Flags = settings.flags;
    11537 
    11538  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11539  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11540 
    11541  // Open file for writing.
    11542  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11543  if(err != 0)
    11544  {
    11545  return VK_ERROR_INITIALIZATION_FAILED;
    11546  }
    11547 
    11548  // Write header.
    11549  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11550  fprintf(m_File, "%s\n", "1,4");
    11551 
    11552  return VK_SUCCESS;
    11553 }
    11554 
    11555 VmaRecorder::~VmaRecorder()
    11556 {
    11557  if(m_File != VMA_NULL)
    11558  {
    11559  fclose(m_File);
    11560  }
    11561 }
    11562 
    11563 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11564 {
    11565  CallParams callParams;
    11566  GetBasicParams(callParams);
    11567 
    11568  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11569  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11574 {
    11575  CallParams callParams;
    11576  GetBasicParams(callParams);
    11577 
    11578  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11579  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11590  createInfo.memoryTypeIndex,
    11591  createInfo.flags,
    11592  createInfo.blockSize,
    11593  (uint64_t)createInfo.minBlockCount,
    11594  (uint64_t)createInfo.maxBlockCount,
    11595  createInfo.frameInUseCount,
    11596  pool);
    11597  Flush();
    11598 }
    11599 
    11600 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11601 {
    11602  CallParams callParams;
    11603  GetBasicParams(callParams);
    11604 
    11605  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11607  pool);
    11608  Flush();
    11609 }
    11610 
    11611 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11612  const VkMemoryRequirements& vkMemReq,
    11613  const VmaAllocationCreateInfo& createInfo,
    11614  VmaAllocation allocation)
    11615 {
    11616  CallParams callParams;
    11617  GetBasicParams(callParams);
    11618 
    11619  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11620  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11621  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11622  vkMemReq.size,
    11623  vkMemReq.alignment,
    11624  vkMemReq.memoryTypeBits,
    11625  createInfo.flags,
    11626  createInfo.usage,
    11627  createInfo.requiredFlags,
    11628  createInfo.preferredFlags,
    11629  createInfo.memoryTypeBits,
    11630  createInfo.pool,
    11631  allocation,
    11632  userDataStr.GetString());
    11633  Flush();
    11634 }
    11635 
    11636 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11637  const VkMemoryRequirements& vkMemReq,
    11638  bool requiresDedicatedAllocation,
    11639  bool prefersDedicatedAllocation,
    11640  const VmaAllocationCreateInfo& createInfo,
    11641  VmaAllocation allocation)
    11642 {
    11643  CallParams callParams;
    11644  GetBasicParams(callParams);
    11645 
    11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11647  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11648  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11649  vkMemReq.size,
    11650  vkMemReq.alignment,
    11651  vkMemReq.memoryTypeBits,
    11652  requiresDedicatedAllocation ? 1 : 0,
    11653  prefersDedicatedAllocation ? 1 : 0,
    11654  createInfo.flags,
    11655  createInfo.usage,
    11656  createInfo.requiredFlags,
    11657  createInfo.preferredFlags,
    11658  createInfo.memoryTypeBits,
    11659  createInfo.pool,
    11660  allocation,
    11661  userDataStr.GetString());
    11662  Flush();
    11663 }
    11664 
    11665 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11666  const VkMemoryRequirements& vkMemReq,
    11667  bool requiresDedicatedAllocation,
    11668  bool prefersDedicatedAllocation,
    11669  const VmaAllocationCreateInfo& createInfo,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11677  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11678  vkMemReq.size,
    11679  vkMemReq.alignment,
    11680  vkMemReq.memoryTypeBits,
    11681  requiresDedicatedAllocation ? 1 : 0,
    11682  prefersDedicatedAllocation ? 1 : 0,
    11683  createInfo.flags,
    11684  createInfo.usage,
    11685  createInfo.requiredFlags,
    11686  createInfo.preferredFlags,
    11687  createInfo.memoryTypeBits,
    11688  createInfo.pool,
    11689  allocation,
    11690  userDataStr.GetString());
    11691  Flush();
    11692 }
    11693 
    11694 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11695  VmaAllocation allocation)
    11696 {
    11697  CallParams callParams;
    11698  GetBasicParams(callParams);
    11699 
    11700  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11701  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11702  allocation);
    11703  Flush();
    11704 }
    11705 
    11706 void VmaRecorder::RecordResizeAllocation(
    11707  uint32_t frameIndex,
    11708  VmaAllocation allocation,
    11709  VkDeviceSize newSize)
    11710 {
    11711  CallParams callParams;
    11712  GetBasicParams(callParams);
    11713 
    11714  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11715  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11716  allocation, newSize);
    11717  Flush();
    11718 }
    11719 
    11720 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11721  VmaAllocation allocation,
    11722  const void* pUserData)
    11723 {
    11724  CallParams callParams;
    11725  GetBasicParams(callParams);
    11726 
    11727  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11728  UserDataString userDataStr(
    11729  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11730  pUserData);
    11731  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11732  allocation,
    11733  userDataStr.GetString());
    11734  Flush();
    11735 }
    11736 
    11737 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11738  VmaAllocation allocation)
    11739 {
    11740  CallParams callParams;
    11741  GetBasicParams(callParams);
    11742 
    11743  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11744  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11745  allocation);
    11746  Flush();
    11747 }
    11748 
    11749 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11750  VmaAllocation allocation)
    11751 {
    11752  CallParams callParams;
    11753  GetBasicParams(callParams);
    11754 
    11755  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11756  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11757  allocation);
    11758  Flush();
    11759 }
    11760 
    11761 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11762  VmaAllocation allocation)
    11763 {
    11764  CallParams callParams;
    11765  GetBasicParams(callParams);
    11766 
    11767  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11768  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11769  allocation);
    11770  Flush();
    11771 }
    11772 
    11773 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11774  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11775 {
    11776  CallParams callParams;
    11777  GetBasicParams(callParams);
    11778 
    11779  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11780  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11781  allocation,
    11782  offset,
    11783  size);
    11784  Flush();
    11785 }
    11786 
    11787 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11788  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11789 {
    11790  CallParams callParams;
    11791  GetBasicParams(callParams);
    11792 
    11793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11794  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11795  allocation,
    11796  offset,
    11797  size);
    11798  Flush();
    11799 }
    11800 
    11801 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11802  const VkBufferCreateInfo& bufCreateInfo,
    11803  const VmaAllocationCreateInfo& allocCreateInfo,
    11804  VmaAllocation allocation)
    11805 {
    11806  CallParams callParams;
    11807  GetBasicParams(callParams);
    11808 
    11809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11810  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11811  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11812  bufCreateInfo.flags,
    11813  bufCreateInfo.size,
    11814  bufCreateInfo.usage,
    11815  bufCreateInfo.sharingMode,
    11816  allocCreateInfo.flags,
    11817  allocCreateInfo.usage,
    11818  allocCreateInfo.requiredFlags,
    11819  allocCreateInfo.preferredFlags,
    11820  allocCreateInfo.memoryTypeBits,
    11821  allocCreateInfo.pool,
    11822  allocation,
    11823  userDataStr.GetString());
    11824  Flush();
    11825 }
    11826 
    11827 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11828  const VkImageCreateInfo& imageCreateInfo,
    11829  const VmaAllocationCreateInfo& allocCreateInfo,
    11830  VmaAllocation allocation)
    11831 {
    11832  CallParams callParams;
    11833  GetBasicParams(callParams);
    11834 
    11835  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11836  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11837  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11838  imageCreateInfo.flags,
    11839  imageCreateInfo.imageType,
    11840  imageCreateInfo.format,
    11841  imageCreateInfo.extent.width,
    11842  imageCreateInfo.extent.height,
    11843  imageCreateInfo.extent.depth,
    11844  imageCreateInfo.mipLevels,
    11845  imageCreateInfo.arrayLayers,
    11846  imageCreateInfo.samples,
    11847  imageCreateInfo.tiling,
    11848  imageCreateInfo.usage,
    11849  imageCreateInfo.sharingMode,
    11850  imageCreateInfo.initialLayout,
    11851  allocCreateInfo.flags,
    11852  allocCreateInfo.usage,
    11853  allocCreateInfo.requiredFlags,
    11854  allocCreateInfo.preferredFlags,
    11855  allocCreateInfo.memoryTypeBits,
    11856  allocCreateInfo.pool,
    11857  allocation,
    11858  userDataStr.GetString());
    11859  Flush();
    11860 }
    11861 
    11862 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11863  VmaAllocation allocation)
    11864 {
    11865  CallParams callParams;
    11866  GetBasicParams(callParams);
    11867 
    11868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11869  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11870  allocation);
    11871  Flush();
    11872 }
    11873 
    11874 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11875  VmaAllocation allocation)
    11876 {
    11877  CallParams callParams;
    11878  GetBasicParams(callParams);
    11879 
    11880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11881  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11882  allocation);
    11883  Flush();
    11884 }
    11885 
    11886 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11887  VmaAllocation allocation)
    11888 {
    11889  CallParams callParams;
    11890  GetBasicParams(callParams);
    11891 
    11892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11893  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11894  allocation);
    11895  Flush();
    11896 }
    11897 
    11898 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11899  VmaAllocation allocation)
    11900 {
    11901  CallParams callParams;
    11902  GetBasicParams(callParams);
    11903 
    11904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11905  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11906  allocation);
    11907  Flush();
    11908 }
    11909 
    11910 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11911  VmaPool pool)
    11912 {
    11913  CallParams callParams;
    11914  GetBasicParams(callParams);
    11915 
    11916  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11917  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11918  pool);
    11919  Flush();
    11920 }
    11921 
    11922 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11923 {
    11924  if(pUserData != VMA_NULL)
    11925  {
    11926  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11927  {
    11928  m_Str = (const char*)pUserData;
    11929  }
    11930  else
    11931  {
    11932  sprintf_s(m_PtrStr, "%p", pUserData);
    11933  m_Str = m_PtrStr;
    11934  }
    11935  }
    11936  else
    11937  {
    11938  m_Str = "";
    11939  }
    11940 }
    11941 
    11942 void VmaRecorder::WriteConfiguration(
    11943  const VkPhysicalDeviceProperties& devProps,
    11944  const VkPhysicalDeviceMemoryProperties& memProps,
    11945  bool dedicatedAllocationExtensionEnabled)
    11946 {
    11947  fprintf(m_File, "Config,Begin\n");
    11948 
    11949  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11950  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11951  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11952  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11953  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11954  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11955 
    11956  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11957  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11958  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11959 
    11960  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11961  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11965  }
    11966  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11967  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11968  {
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11970  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11971  }
    11972 
    11973  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11974 
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11981  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11982  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11983  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11984 
    11985  fprintf(m_File, "Config,End\n");
    11986 }
    11987 
    11988 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11989 {
    11990  outParams.threadId = GetCurrentThreadId();
    11991 
    11992  LARGE_INTEGER counter;
    11993  QueryPerformanceCounter(&counter);
    11994  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11995 }
    11996 
    11997 void VmaRecorder::Flush()
    11998 {
    11999  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    12000  {
    12001  fflush(m_File);
    12002  }
    12003 }
    12004 
    12005 #endif // #if VMA_RECORDING_ENABLED
    12006 
    12008 // VmaAllocator_T
    12009 
    12010 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12011  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12012  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12013  m_hDevice(pCreateInfo->device),
    12014  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12015  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12016  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12017  m_PreferredLargeHeapBlockSize(0),
    12018  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12019  m_CurrentFrameIndex(0),
    12020  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12021  m_NextPoolId(0)
    12023  ,m_pRecorder(VMA_NULL)
    12024 #endif
    12025 {
    12026  if(VMA_DEBUG_DETECT_CORRUPTION)
    12027  {
    12028  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12029  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12030  }
    12031 
    12032  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12033 
    12034 #if !(VMA_DEDICATED_ALLOCATION)
    12036  {
    12037  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12038  }
    12039 #endif
    12040 
    12041  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12042  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12043  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12044 
    12045  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12046  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12047 
    12048  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12049  {
    12050  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12051  }
    12052 
    12053  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12054  {
    12055  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12056  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12057  }
    12058 
    12059  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12060 
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12062  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12063 
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12065  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12067  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12068 
    12069  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12070  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12071 
    12072  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12073  {
    12074  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12075  {
    12076  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12077  if(limit != VK_WHOLE_SIZE)
    12078  {
    12079  m_HeapSizeLimit[heapIndex] = limit;
    12080  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12081  {
    12082  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12083  }
    12084  }
    12085  }
    12086  }
    12087 
    12088  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12089  {
    12090  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12091 
    12092  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12093  this,
    12094  memTypeIndex,
    12095  preferredBlockSize,
    12096  0,
    12097  SIZE_MAX,
    12098  GetBufferImageGranularity(),
    12099  pCreateInfo->frameInUseCount,
    12100  false, // isCustomPool
    12101  false, // explicitBlockSize
    12102  false); // linearAlgorithm
    12103  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12104  // becase minBlockCount is 0.
    12105  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12106 
    12107  }
    12108 }
    12109 
    12110 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12111 {
    12112  VkResult res = VK_SUCCESS;
    12113 
    12114  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12115  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12116  {
    12117 #if VMA_RECORDING_ENABLED
    12118  m_pRecorder = vma_new(this, VmaRecorder)();
    12119  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12120  if(res != VK_SUCCESS)
    12121  {
    12122  return res;
    12123  }
    12124  m_pRecorder->WriteConfiguration(
    12125  m_PhysicalDeviceProperties,
    12126  m_MemProps,
    12127  m_UseKhrDedicatedAllocation);
    12128  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12129 #else
    12130  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12131  return VK_ERROR_FEATURE_NOT_PRESENT;
    12132 #endif
    12133  }
    12134 
    12135  return res;
    12136 }
    12137 
    12138 VmaAllocator_T::~VmaAllocator_T()
    12139 {
    12140 #if VMA_RECORDING_ENABLED
    12141  if(m_pRecorder != VMA_NULL)
    12142  {
    12143  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12144  vma_delete(this, m_pRecorder);
    12145  }
    12146 #endif
    12147 
    12148  VMA_ASSERT(m_Pools.empty());
    12149 
    12150  for(size_t i = GetMemoryTypeCount(); i--; )
    12151  {
    12152  vma_delete(this, m_pDedicatedAllocations[i]);
    12153  vma_delete(this, m_pBlockVectors[i]);
    12154  }
    12155 }
    12156 
    12157 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12158 {
    12159 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12160  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12161  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12162  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12163  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12164  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12165  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12166  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12167  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12168  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12169  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12170  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12171  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12172  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12173  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12174  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12175  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12176 #if VMA_DEDICATED_ALLOCATION
    12177  if(m_UseKhrDedicatedAllocation)
    12178  {
    12179  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12180  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12181  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12182  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12183  }
    12184 #endif // #if VMA_DEDICATED_ALLOCATION
    12185 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12186 
    12187 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12188  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12189 
    12190  if(pVulkanFunctions != VMA_NULL)
    12191  {
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12194  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12200  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12202  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12204  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12206  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12207  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12208 #if VMA_DEDICATED_ALLOCATION
    12209  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12210  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12211 #endif
    12212  }
    12213 
    12214 #undef VMA_COPY_IF_NOT_NULL
    12215 
    12216  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12217  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12233  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12238  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12239  }
    12240 #endif
    12241 }
    12242 
    12243 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12244 {
    12245  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12246  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12247  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12248  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12249 }
    12250 
    12251 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12252  VkDeviceSize size,
    12253  VkDeviceSize alignment,
    12254  bool dedicatedAllocation,
    12255  VkBuffer dedicatedBuffer,
    12256  VkImage dedicatedImage,
    12257  const VmaAllocationCreateInfo& createInfo,
    12258  uint32_t memTypeIndex,
    12259  VmaSuballocationType suballocType,
    12260  VmaAllocation* pAllocation)
    12261 {
    12262  VMA_ASSERT(pAllocation != VMA_NULL);
    12263  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12264 
    12265  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12266 
    12267  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12268  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12269  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12270  {
    12271  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12272  }
    12273 
    12274  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12275  VMA_ASSERT(blockVector);
    12276 
    12277  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12278  bool preferDedicatedMemory =
    12279  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12280  dedicatedAllocation ||
    12281  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12282  size > preferredBlockSize / 2;
    12283 
    12284  if(preferDedicatedMemory &&
    12285  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12286  finalCreateInfo.pool == VK_NULL_HANDLE)
    12287  {
    12289  }
    12290 
    12291  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12292  {
    12293  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12294  {
    12295  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12296  }
    12297  else
    12298  {
    12299  return AllocateDedicatedMemory(
    12300  size,
    12301  suballocType,
    12302  memTypeIndex,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12304  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12305  finalCreateInfo.pUserData,
    12306  dedicatedBuffer,
    12307  dedicatedImage,
    12308  pAllocation);
    12309  }
    12310  }
    12311  else
    12312  {
    12313  VkResult res = blockVector->Allocate(
    12314  VK_NULL_HANDLE, // hCurrentPool
    12315  m_CurrentFrameIndex.load(),
    12316  size,
    12317  alignment,
    12318  finalCreateInfo,
    12319  suballocType,
    12320  pAllocation);
    12321  if(res == VK_SUCCESS)
    12322  {
    12323  return res;
    12324  }
    12325 
    12326  // 5. Try dedicated memory.
    12327  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12328  {
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331  else
    12332  {
    12333  res = AllocateDedicatedMemory(
    12334  size,
    12335  suballocType,
    12336  memTypeIndex,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12338  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12339  finalCreateInfo.pUserData,
    12340  dedicatedBuffer,
    12341  dedicatedImage,
    12342  pAllocation);
    12343  if(res == VK_SUCCESS)
    12344  {
    12345  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12346  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12347  return VK_SUCCESS;
    12348  }
    12349  else
    12350  {
    12351  // Everything failed: Return error code.
    12352  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12353  return res;
    12354  }
    12355  }
    12356  }
    12357 }
    12358 
    12359 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12360  VkDeviceSize size,
    12361  VmaSuballocationType suballocType,
    12362  uint32_t memTypeIndex,
    12363  bool map,
    12364  bool isUserDataString,
    12365  void* pUserData,
    12366  VkBuffer dedicatedBuffer,
    12367  VkImage dedicatedImage,
    12368  VmaAllocation* pAllocation)
    12369 {
    12370  VMA_ASSERT(pAllocation);
    12371 
    12372  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12373  allocInfo.memoryTypeIndex = memTypeIndex;
    12374  allocInfo.allocationSize = size;
    12375 
    12376 #if VMA_DEDICATED_ALLOCATION
    12377  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12378  if(m_UseKhrDedicatedAllocation)
    12379  {
    12380  if(dedicatedBuffer != VK_NULL_HANDLE)
    12381  {
    12382  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12383  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12384  allocInfo.pNext = &dedicatedAllocInfo;
    12385  }
    12386  else if(dedicatedImage != VK_NULL_HANDLE)
    12387  {
    12388  dedicatedAllocInfo.image = dedicatedImage;
    12389  allocInfo.pNext = &dedicatedAllocInfo;
    12390  }
    12391  }
    12392 #endif // #if VMA_DEDICATED_ALLOCATION
    12393 
    12394  // Allocate VkDeviceMemory.
    12395  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12396  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12397  if(res < 0)
    12398  {
    12399  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12400  return res;
    12401  }
    12402 
    12403  void* pMappedData = VMA_NULL;
    12404  if(map)
    12405  {
    12406  res = (*m_VulkanFunctions.vkMapMemory)(
    12407  m_hDevice,
    12408  hMemory,
    12409  0,
    12410  VK_WHOLE_SIZE,
    12411  0,
    12412  &pMappedData);
    12413  if(res < 0)
    12414  {
    12415  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12416  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12417  return res;
    12418  }
    12419  }
    12420 
    12421  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12422  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12423  (*pAllocation)->SetUserData(this, pUserData);
    12424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12425  {
    12426  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12427  }
    12428 
    12429  // Register it in m_pDedicatedAllocations.
    12430  {
    12431  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12432  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12433  VMA_ASSERT(pDedicatedAllocations);
    12434  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12435  }
    12436 
    12437  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12438 
    12439  return VK_SUCCESS;
    12440 }
    12441 
    12442 void VmaAllocator_T::GetBufferMemoryRequirements(
    12443  VkBuffer hBuffer,
    12444  VkMemoryRequirements& memReq,
    12445  bool& requiresDedicatedAllocation,
    12446  bool& prefersDedicatedAllocation) const
    12447 {
    12448 #if VMA_DEDICATED_ALLOCATION
    12449  if(m_UseKhrDedicatedAllocation)
    12450  {
    12451  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12452  memReqInfo.buffer = hBuffer;
    12453 
    12454  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12455 
    12456  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12457  memReq2.pNext = &memDedicatedReq;
    12458 
    12459  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12460 
    12461  memReq = memReq2.memoryRequirements;
    12462  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12463  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12464  }
    12465  else
    12466 #endif // #if VMA_DEDICATED_ALLOCATION
    12467  {
    12468  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12469  requiresDedicatedAllocation = false;
    12470  prefersDedicatedAllocation = false;
    12471  }
    12472 }
    12473 
    12474 void VmaAllocator_T::GetImageMemoryRequirements(
    12475  VkImage hImage,
    12476  VkMemoryRequirements& memReq,
    12477  bool& requiresDedicatedAllocation,
    12478  bool& prefersDedicatedAllocation) const
    12479 {
    12480 #if VMA_DEDICATED_ALLOCATION
    12481  if(m_UseKhrDedicatedAllocation)
    12482  {
    12483  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12484  memReqInfo.image = hImage;
    12485 
    12486  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12487 
    12488  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12489  memReq2.pNext = &memDedicatedReq;
    12490 
    12491  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12492 
    12493  memReq = memReq2.memoryRequirements;
    12494  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12495  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12496  }
    12497  else
    12498 #endif // #if VMA_DEDICATED_ALLOCATION
    12499  {
    12500  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12501  requiresDedicatedAllocation = false;
    12502  prefersDedicatedAllocation = false;
    12503  }
    12504 }
    12505 
    12506 VkResult VmaAllocator_T::AllocateMemory(
    12507  const VkMemoryRequirements& vkMemReq,
    12508  bool requiresDedicatedAllocation,
    12509  bool prefersDedicatedAllocation,
    12510  VkBuffer dedicatedBuffer,
    12511  VkImage dedicatedImage,
    12512  const VmaAllocationCreateInfo& createInfo,
    12513  VmaSuballocationType suballocType,
    12514  VmaAllocation* pAllocation)
    12515 {
    12516  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12517 
    12518  if(vkMemReq.size == 0)
    12519  {
    12520  return VK_ERROR_VALIDATION_FAILED_EXT;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12523  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12530  {
    12531  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12532  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12533  }
    12534  if(requiresDedicatedAllocation)
    12535  {
    12536  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12537  {
    12538  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12539  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12540  }
    12541  if(createInfo.pool != VK_NULL_HANDLE)
    12542  {
    12543  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12545  }
    12546  }
    12547  if((createInfo.pool != VK_NULL_HANDLE) &&
    12548  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12549  {
    12550  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12552  }
    12553 
    12554  if(createInfo.pool != VK_NULL_HANDLE)
    12555  {
    12556  const VkDeviceSize alignmentForPool = VMA_MAX(
    12557  vkMemReq.alignment,
    12558  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12559  return createInfo.pool->m_BlockVector.Allocate(
    12560  createInfo.pool,
    12561  m_CurrentFrameIndex.load(),
    12562  vkMemReq.size,
    12563  alignmentForPool,
    12564  createInfo,
    12565  suballocType,
    12566  pAllocation);
    12567  }
    12568  else
    12569  {
    12570  // Bit mask of memory Vulkan types acceptable for this allocation.
    12571  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12572  uint32_t memTypeIndex = UINT32_MAX;
    12573  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12574  if(res == VK_SUCCESS)
    12575  {
    12576  VkDeviceSize alignmentForMemType = VMA_MAX(
    12577  vkMemReq.alignment,
    12578  GetMemoryTypeMinAlignment(memTypeIndex));
    12579 
    12580  res = AllocateMemoryOfType(
    12581  vkMemReq.size,
    12582  alignmentForMemType,
    12583  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12584  dedicatedBuffer,
    12585  dedicatedImage,
    12586  createInfo,
    12587  memTypeIndex,
    12588  suballocType,
    12589  pAllocation);
    12590  // Succeeded on first try.
    12591  if(res == VK_SUCCESS)
    12592  {
    12593  return res;
    12594  }
    12595  // Allocation from this memory type failed. Try other compatible memory types.
    12596  else
    12597  {
    12598  for(;;)
    12599  {
    12600  // Remove old memTypeIndex from list of possibilities.
    12601  memoryTypeBits &= ~(1u << memTypeIndex);
    12602  // Find alternative memTypeIndex.
    12603  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12604  if(res == VK_SUCCESS)
    12605  {
    12606  alignmentForMemType = VMA_MAX(
    12607  vkMemReq.alignment,
    12608  GetMemoryTypeMinAlignment(memTypeIndex));
    12609 
    12610  res = AllocateMemoryOfType(
    12611  vkMemReq.size,
    12612  alignmentForMemType,
    12613  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12614  dedicatedBuffer,
    12615  dedicatedImage,
    12616  createInfo,
    12617  memTypeIndex,
    12618  suballocType,
    12619  pAllocation);
    12620  // Allocation from this alternative memory type succeeded.
    12621  if(res == VK_SUCCESS)
    12622  {
    12623  return res;
    12624  }
    12625  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12626  }
    12627  // No other matching memory type index could be found.
    12628  else
    12629  {
    12630  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12631  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12632  }
    12633  }
    12634  }
    12635  }
    12636  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12637  else
    12638  return res;
    12639  }
    12640 }
    12641 
    12642 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12643 {
    12644  VMA_ASSERT(allocation);
    12645 
    12646  if(TouchAllocation(allocation))
    12647  {
    12648  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12649  {
    12650  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12651  }
    12652 
    12653  switch(allocation->GetType())
    12654  {
    12655  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12656  {
    12657  VmaBlockVector* pBlockVector = VMA_NULL;
    12658  VmaPool hPool = allocation->GetPool();
    12659  if(hPool != VK_NULL_HANDLE)
    12660  {
    12661  pBlockVector = &hPool->m_BlockVector;
    12662  }
    12663  else
    12664  {
    12665  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12666  pBlockVector = m_pBlockVectors[memTypeIndex];
    12667  }
    12668  pBlockVector->Free(allocation);
    12669  }
    12670  break;
    12671  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12672  FreeDedicatedMemory(allocation);
    12673  break;
    12674  default:
    12675  VMA_ASSERT(0);
    12676  }
    12677  }
    12678 
    12679  allocation->SetUserData(this, VMA_NULL);
    12680  vma_delete(this, allocation);
    12681 }
    12682 
    12683 VkResult VmaAllocator_T::ResizeAllocation(
    12684  const VmaAllocation alloc,
    12685  VkDeviceSize newSize)
    12686 {
    12687  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12688  {
    12689  return VK_ERROR_VALIDATION_FAILED_EXT;
    12690  }
    12691  if(newSize == alloc->GetSize())
    12692  {
    12693  return VK_SUCCESS;
    12694  }
    12695 
    12696  switch(alloc->GetType())
    12697  {
    12698  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12699  return VK_ERROR_FEATURE_NOT_PRESENT;
    12700  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12701  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12702  {
    12703  alloc->ChangeSize(newSize);
    12704  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12705  return VK_SUCCESS;
    12706  }
    12707  else
    12708  {
    12709  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12710  }
    12711  default:
    12712  VMA_ASSERT(0);
    12713  return VK_ERROR_VALIDATION_FAILED_EXT;
    12714  }
    12715 }
    12716 
    12717 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12718 {
    12719  // Initialize.
    12720  InitStatInfo(pStats->total);
    12721  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12722  InitStatInfo(pStats->memoryType[i]);
    12723  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12724  InitStatInfo(pStats->memoryHeap[i]);
    12725 
    12726  // Process default pools.
    12727  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12728  {
    12729  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12730  VMA_ASSERT(pBlockVector);
    12731  pBlockVector->AddStats(pStats);
    12732  }
    12733 
    12734  // Process custom pools.
    12735  {
    12736  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12737  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12738  {
    12739  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12740  }
    12741  }
    12742 
    12743  // Process dedicated allocations.
    12744  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12745  {
    12746  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12747  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12748  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12749  VMA_ASSERT(pDedicatedAllocVector);
    12750  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12751  {
    12752  VmaStatInfo allocationStatInfo;
    12753  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12754  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12756  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12757  }
    12758  }
    12759 
    12760  // Postprocess.
    12761  VmaPostprocessCalcStatInfo(pStats->total);
    12762  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12763  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12764  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12765  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12766 }
    12767 
    12768 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12769 
    12770 VkResult VmaAllocator_T::Defragment(
    12771  VmaAllocation* pAllocations,
    12772  size_t allocationCount,
    12773  VkBool32* pAllocationsChanged,
    12774  const VmaDefragmentationInfo* pDefragmentationInfo,
    12775  VmaDefragmentationStats* pDefragmentationStats)
    12776 {
    12777  if(pAllocationsChanged != VMA_NULL)
    12778  {
    12779  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12780  }
    12781  if(pDefragmentationStats != VMA_NULL)
    12782  {
    12783  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12784  }
    12785 
    12786  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12787 
    12788  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12789 
    12790  const size_t poolCount = m_Pools.size();
    12791 
    12792  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12793  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12794  {
    12795  VmaAllocation hAlloc = pAllocations[allocIndex];
    12796  VMA_ASSERT(hAlloc);
    12797  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12798  // DedicatedAlloc cannot be defragmented.
    12799  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12800  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12801  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12802  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12803  // Lost allocation cannot be defragmented.
    12804  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12805  {
    12806  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12807 
    12808  const VmaPool hAllocPool = hAlloc->GetPool();
    12809  // This allocation belongs to custom pool.
    12810  if(hAllocPool != VK_NULL_HANDLE)
    12811  {
    12812  // Pools with linear or buddy algorithm are not defragmented.
    12813  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12814  {
    12815  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12816  }
    12817  }
    12818  // This allocation belongs to general pool.
    12819  else
    12820  {
    12821  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12822  }
    12823 
    12824  if(pAllocBlockVector != VMA_NULL)
    12825  {
    12826  VmaDefragmentator* const pDefragmentator =
    12827  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12828  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12829  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12830  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12831  }
    12832  }
    12833  }
    12834 
    12835  VkResult result = VK_SUCCESS;
    12836 
    12837  // ======== Main processing.
    12838 
    12839  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12840  uint32_t maxAllocationsToMove = UINT32_MAX;
    12841  if(pDefragmentationInfo != VMA_NULL)
    12842  {
    12843  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12844  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12845  }
    12846 
    12847  // Process standard memory.
    12848  for(uint32_t memTypeIndex = 0;
    12849  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12850  ++memTypeIndex)
    12851  {
    12852  // Only HOST_VISIBLE memory types can be defragmented.
    12853  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12854  {
    12855  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12856  pDefragmentationStats,
    12857  maxBytesToMove,
    12858  maxAllocationsToMove);
    12859  }
    12860  }
    12861 
    12862  // Process custom pools.
    12863  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12864  {
    12865  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12866  pDefragmentationStats,
    12867  maxBytesToMove,
    12868  maxAllocationsToMove);
    12869  }
    12870 
    12871  // ======== Destroy defragmentators.
    12872 
    12873  // Process custom pools.
    12874  for(size_t poolIndex = poolCount; poolIndex--; )
    12875  {
    12876  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12877  }
    12878 
    12879  // Process standard memory.
    12880  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12881  {
    12882  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12883  {
    12884  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12885  }
    12886  }
    12887 
    12888  return result;
    12889 }
    12890 
    12891 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12892 {
    12893  if(hAllocation->CanBecomeLost())
    12894  {
    12895  /*
    12896  Warning: This is a carefully designed algorithm.
    12897  Do not modify unless you really know what you're doing :)
    12898  */
    12899  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12900  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12901  for(;;)
    12902  {
    12903  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12904  {
    12905  pAllocationInfo->memoryType = UINT32_MAX;
    12906  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12907  pAllocationInfo->offset = 0;
    12908  pAllocationInfo->size = hAllocation->GetSize();
    12909  pAllocationInfo->pMappedData = VMA_NULL;
    12910  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12911  return;
    12912  }
    12913  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12914  {
    12915  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12916  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12917  pAllocationInfo->offset = hAllocation->GetOffset();
    12918  pAllocationInfo->size = hAllocation->GetSize();
    12919  pAllocationInfo->pMappedData = VMA_NULL;
    12920  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12921  return;
    12922  }
    12923  else // Last use time earlier than current time.
    12924  {
    12925  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12926  {
    12927  localLastUseFrameIndex = localCurrFrameIndex;
    12928  }
    12929  }
    12930  }
    12931  }
    12932  else
    12933  {
    12934 #if VMA_STATS_STRING_ENABLED
    12935  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12936  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12937  for(;;)
    12938  {
    12939  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12940  if(localLastUseFrameIndex == localCurrFrameIndex)
    12941  {
    12942  break;
    12943  }
    12944  else // Last use time earlier than current time.
    12945  {
    12946  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12947  {
    12948  localLastUseFrameIndex = localCurrFrameIndex;
    12949  }
    12950  }
    12951  }
    12952 #endif
    12953 
    12954  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12955  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12956  pAllocationInfo->offset = hAllocation->GetOffset();
    12957  pAllocationInfo->size = hAllocation->GetSize();
    12958  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12959  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12960  }
    12961 }
    12962 
    12963 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12964 {
    12965  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12966  if(hAllocation->CanBecomeLost())
    12967  {
    12968  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12969  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12970  for(;;)
    12971  {
    12972  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12973  {
    12974  return false;
    12975  }
    12976  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12977  {
    12978  return true;
    12979  }
    12980  else // Last use time earlier than current time.
    12981  {
    12982  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12983  {
    12984  localLastUseFrameIndex = localCurrFrameIndex;
    12985  }
    12986  }
    12987  }
    12988  }
    12989  else
    12990  {
    12991 #if VMA_STATS_STRING_ENABLED
    12992  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12993  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12994  for(;;)
    12995  {
    12996  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12997  if(localLastUseFrameIndex == localCurrFrameIndex)
    12998  {
    12999  break;
    13000  }
    13001  else // Last use time earlier than current time.
    13002  {
    13003  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13004  {
    13005  localLastUseFrameIndex = localCurrFrameIndex;
    13006  }
    13007  }
    13008  }
    13009 #endif
    13010 
    13011  return true;
    13012  }
    13013 }
    13014 
    13015 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13016 {
    13017  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13018 
    13019  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13020 
    13021  if(newCreateInfo.maxBlockCount == 0)
    13022  {
    13023  newCreateInfo.maxBlockCount = SIZE_MAX;
    13024  }
    13025  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13026  {
    13027  return VK_ERROR_INITIALIZATION_FAILED;
    13028  }
    13029 
    13030  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13031 
    13032  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13033 
    13034  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13035  if(res != VK_SUCCESS)
    13036  {
    13037  vma_delete(this, *pPool);
    13038  *pPool = VMA_NULL;
    13039  return res;
    13040  }
    13041 
    13042  // Add to m_Pools.
    13043  {
    13044  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13045  (*pPool)->SetId(m_NextPoolId++);
    13046  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13047  }
    13048 
    13049  return VK_SUCCESS;
    13050 }
    13051 
    13052 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13053 {
    13054  // Remove from m_Pools.
    13055  {
    13056  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13057  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13058  VMA_ASSERT(success && "Pool not found in Allocator.");
    13059  }
    13060 
    13061  vma_delete(this, pool);
    13062 }
    13063 
    13064 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13065 {
    13066  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13067 }
    13068 
    13069 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13070 {
    13071  m_CurrentFrameIndex.store(frameIndex);
    13072 }
    13073 
    13074 void VmaAllocator_T::MakePoolAllocationsLost(
    13075  VmaPool hPool,
    13076  size_t* pLostAllocationCount)
    13077 {
    13078  hPool->m_BlockVector.MakePoolAllocationsLost(
    13079  m_CurrentFrameIndex.load(),
    13080  pLostAllocationCount);
    13081 }
    13082 
    13083 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13084 {
    13085  return hPool->m_BlockVector.CheckCorruption();
    13086 }
    13087 
    13088 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13089 {
    13090  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13091 
    13092  // Process default pools.
    13093  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13094  {
    13095  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13096  {
    13097  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13098  VMA_ASSERT(pBlockVector);
    13099  VkResult localRes = pBlockVector->CheckCorruption();
    13100  switch(localRes)
    13101  {
    13102  case VK_ERROR_FEATURE_NOT_PRESENT:
    13103  break;
    13104  case VK_SUCCESS:
    13105  finalRes = VK_SUCCESS;
    13106  break;
    13107  default:
    13108  return localRes;
    13109  }
    13110  }
    13111  }
    13112 
    13113  // Process custom pools.
    13114  {
    13115  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13116  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13117  {
    13118  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13119  {
    13120  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13121  switch(localRes)
    13122  {
    13123  case VK_ERROR_FEATURE_NOT_PRESENT:
    13124  break;
    13125  case VK_SUCCESS:
    13126  finalRes = VK_SUCCESS;
    13127  break;
    13128  default:
    13129  return localRes;
    13130  }
    13131  }
    13132  }
    13133  }
    13134 
    13135  return finalRes;
    13136 }
    13137 
    13138 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13139 {
    13140  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13141  (*pAllocation)->InitLost();
    13142 }
    13143 
    13144 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13145 {
    13146  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13147 
    13148  VkResult res;
    13149  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13150  {
    13151  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13152  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13153  {
    13154  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13158  }
    13159  }
    13160  else
    13161  {
    13162  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13163  }
    13164  }
    13165  else
    13166  {
    13167  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13168  }
    13169 
    13170  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13171  {
    13172  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13173  }
    13174 
    13175  return res;
    13176 }
    13177 
    13178 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13179 {
    13180  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13181  {
    13182  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13183  }
    13184 
    13185  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13186 
    13187  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13188  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13189  {
    13190  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13191  m_HeapSizeLimit[heapIndex] += size;
    13192  }
    13193 }
    13194 
    13195 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13196 {
    13197  if(hAllocation->CanBecomeLost())
    13198  {
    13199  return VK_ERROR_MEMORY_MAP_FAILED;
    13200  }
    13201 
    13202  switch(hAllocation->GetType())
    13203  {
    13204  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13205  {
    13206  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13207  char *pBytes = VMA_NULL;
    13208  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13209  if(res == VK_SUCCESS)
    13210  {
    13211  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13212  hAllocation->BlockAllocMap();
    13213  }
    13214  return res;
    13215  }
    13216  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13217  return hAllocation->DedicatedAllocMap(this, ppData);
    13218  default:
    13219  VMA_ASSERT(0);
    13220  return VK_ERROR_MEMORY_MAP_FAILED;
    13221  }
    13222 }
    13223 
    13224 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13225 {
    13226  switch(hAllocation->GetType())
    13227  {
    13228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13229  {
    13230  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13231  hAllocation->BlockAllocUnmap();
    13232  pBlock->Unmap(this, 1);
    13233  }
    13234  break;
    13235  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13236  hAllocation->DedicatedAllocUnmap(this);
    13237  break;
    13238  default:
    13239  VMA_ASSERT(0);
    13240  }
    13241 }
    13242 
    13243 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13244 {
    13245  VkResult res = VK_SUCCESS;
    13246  switch(hAllocation->GetType())
    13247  {
    13248  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13249  res = GetVulkanFunctions().vkBindBufferMemory(
    13250  m_hDevice,
    13251  hBuffer,
    13252  hAllocation->GetMemory(),
    13253  0); //memoryOffset
    13254  break;
    13255  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13256  {
    13257  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13258  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13259  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13260  break;
    13261  }
    13262  default:
    13263  VMA_ASSERT(0);
    13264  }
    13265  return res;
    13266 }
    13267 
    13268 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13269 {
    13270  VkResult res = VK_SUCCESS;
    13271  switch(hAllocation->GetType())
    13272  {
    13273  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13274  res = GetVulkanFunctions().vkBindImageMemory(
    13275  m_hDevice,
    13276  hImage,
    13277  hAllocation->GetMemory(),
    13278  0); //memoryOffset
    13279  break;
    13280  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13281  {
    13282  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13283  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13284  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13285  break;
    13286  }
    13287  default:
    13288  VMA_ASSERT(0);
    13289  }
    13290  return res;
    13291 }
    13292 
    13293 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13294  VmaAllocation hAllocation,
    13295  VkDeviceSize offset, VkDeviceSize size,
    13296  VMA_CACHE_OPERATION op)
    13297 {
    13298  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13299  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13300  {
    13301  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13302  VMA_ASSERT(offset <= allocationSize);
    13303 
    13304  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13305 
    13306  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13307  memRange.memory = hAllocation->GetMemory();
    13308 
    13309  switch(hAllocation->GetType())
    13310  {
    13311  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13312  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13313  if(size == VK_WHOLE_SIZE)
    13314  {
    13315  memRange.size = allocationSize - memRange.offset;
    13316  }
    13317  else
    13318  {
    13319  VMA_ASSERT(offset + size <= allocationSize);
    13320  memRange.size = VMA_MIN(
    13321  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13322  allocationSize - memRange.offset);
    13323  }
    13324  break;
    13325 
    13326  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13327  {
    13328  // 1. Still within this allocation.
    13329  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13330  if(size == VK_WHOLE_SIZE)
    13331  {
    13332  size = allocationSize - offset;
    13333  }
    13334  else
    13335  {
    13336  VMA_ASSERT(offset + size <= allocationSize);
    13337  }
    13338  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13339 
    13340  // 2. Adjust to whole block.
    13341  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13342  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13343  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13344  memRange.offset += allocationOffset;
    13345  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13346 
    13347  break;
    13348  }
    13349 
    13350  default:
    13351  VMA_ASSERT(0);
    13352  }
    13353 
    13354  switch(op)
    13355  {
    13356  case VMA_CACHE_FLUSH:
    13357  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13358  break;
    13359  case VMA_CACHE_INVALIDATE:
    13360  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13361  break;
    13362  default:
    13363  VMA_ASSERT(0);
    13364  }
    13365  }
    13366  // else: Just ignore this call.
    13367 }
    13368 
    13369 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13370 {
    13371  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13372 
    13373  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13374  {
    13375  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13376  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13377  VMA_ASSERT(pDedicatedAllocations);
    13378  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13379  VMA_ASSERT(success);
    13380  }
    13381 
    13382  VkDeviceMemory hMemory = allocation->GetMemory();
    13383 
    13384  /*
    13385  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13386  before vkFreeMemory.
    13387 
    13388  if(allocation->GetMappedData() != VMA_NULL)
    13389  {
    13390  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13391  }
    13392  */
    13393 
    13394  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13395 
    13396  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13397 }
    13398 
    13399 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13400 {
    13401  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13402  !hAllocation->CanBecomeLost() &&
    13403  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13404  {
    13405  void* pData = VMA_NULL;
    13406  VkResult res = Map(hAllocation, &pData);
    13407  if(res == VK_SUCCESS)
    13408  {
    13409  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13410  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13411  Unmap(hAllocation);
    13412  }
    13413  else
    13414  {
    13415  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13416  }
    13417  }
    13418 }
    13419 
    13420 #if VMA_STATS_STRING_ENABLED
    13421 
    13422 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13423 {
    13424  bool dedicatedAllocationsStarted = false;
    13425  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13426  {
    13427  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13428  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13429  VMA_ASSERT(pDedicatedAllocVector);
    13430  if(pDedicatedAllocVector->empty() == false)
    13431  {
    13432  if(dedicatedAllocationsStarted == false)
    13433  {
    13434  dedicatedAllocationsStarted = true;
    13435  json.WriteString("DedicatedAllocations");
    13436  json.BeginObject();
    13437  }
    13438 
    13439  json.BeginString("Type ");
    13440  json.ContinueString(memTypeIndex);
    13441  json.EndString();
    13442 
    13443  json.BeginArray();
    13444 
    13445  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13446  {
    13447  json.BeginObject(true);
    13448  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13449  hAlloc->PrintParameters(json);
    13450  json.EndObject();
    13451  }
    13452 
    13453  json.EndArray();
    13454  }
    13455  }
    13456  if(dedicatedAllocationsStarted)
    13457  {
    13458  json.EndObject();
    13459  }
    13460 
    13461  {
    13462  bool allocationsStarted = false;
    13463  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13464  {
    13465  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13466  {
    13467  if(allocationsStarted == false)
    13468  {
    13469  allocationsStarted = true;
    13470  json.WriteString("DefaultPools");
    13471  json.BeginObject();
    13472  }
    13473 
    13474  json.BeginString("Type ");
    13475  json.ContinueString(memTypeIndex);
    13476  json.EndString();
    13477 
    13478  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13479  }
    13480  }
    13481  if(allocationsStarted)
    13482  {
    13483  json.EndObject();
    13484  }
    13485  }
    13486 
    13487  // Custom pools
    13488  {
    13489  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13490  const size_t poolCount = m_Pools.size();
    13491  if(poolCount > 0)
    13492  {
    13493  json.WriteString("Pools");
    13494  json.BeginObject();
    13495  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13496  {
    13497  json.BeginString();
    13498  json.ContinueString(m_Pools[poolIndex]->GetId());
    13499  json.EndString();
    13500 
    13501  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13502  }
    13503  json.EndObject();
    13504  }
    13505  }
    13506 }
    13507 
    13508 #endif // #if VMA_STATS_STRING_ENABLED
    13509 
    13511 // Public interface
    13512 
    13513 VkResult vmaCreateAllocator(
    13514  const VmaAllocatorCreateInfo* pCreateInfo,
    13515  VmaAllocator* pAllocator)
    13516 {
    13517  VMA_ASSERT(pCreateInfo && pAllocator);
    13518  VMA_DEBUG_LOG("vmaCreateAllocator");
    13519  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13520  return (*pAllocator)->Init(pCreateInfo);
    13521 }
    13522 
    13523 void vmaDestroyAllocator(
    13524  VmaAllocator allocator)
    13525 {
    13526  if(allocator != VK_NULL_HANDLE)
    13527  {
    13528  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13529  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13530  vma_delete(&allocationCallbacks, allocator);
    13531  }
    13532 }
    13533 
    13535  VmaAllocator allocator,
    13536  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13537 {
    13538  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13539  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13540 }
    13541 
    13543  VmaAllocator allocator,
    13544  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13545 {
    13546  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13547  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13548 }
    13549 
    13551  VmaAllocator allocator,
    13552  uint32_t memoryTypeIndex,
    13553  VkMemoryPropertyFlags* pFlags)
    13554 {
    13555  VMA_ASSERT(allocator && pFlags);
    13556  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13557  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13558 }
    13559 
    13561  VmaAllocator allocator,
    13562  uint32_t frameIndex)
    13563 {
    13564  VMA_ASSERT(allocator);
    13565  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13566 
    13567  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13568 
    13569  allocator->SetCurrentFrameIndex(frameIndex);
    13570 }
    13571 
    13572 void vmaCalculateStats(
    13573  VmaAllocator allocator,
    13574  VmaStats* pStats)
    13575 {
    13576  VMA_ASSERT(allocator && pStats);
    13577  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13578  allocator->CalculateStats(pStats);
    13579 }
    13580 
    13581 #if VMA_STATS_STRING_ENABLED
    13582 
    13583 void vmaBuildStatsString(
    13584  VmaAllocator allocator,
    13585  char** ppStatsString,
    13586  VkBool32 detailedMap)
    13587 {
    13588  VMA_ASSERT(allocator && ppStatsString);
    13589  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13590 
    13591  VmaStringBuilder sb(allocator);
    13592  {
    13593  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13594  json.BeginObject();
    13595 
    13596  VmaStats stats;
    13597  allocator->CalculateStats(&stats);
    13598 
    13599  json.WriteString("Total");
    13600  VmaPrintStatInfo(json, stats.total);
    13601 
    13602  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13603  {
    13604  json.BeginString("Heap ");
    13605  json.ContinueString(heapIndex);
    13606  json.EndString();
    13607  json.BeginObject();
    13608 
    13609  json.WriteString("Size");
    13610  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13611 
    13612  json.WriteString("Flags");
    13613  json.BeginArray(true);
    13614  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13615  {
    13616  json.WriteString("DEVICE_LOCAL");
    13617  }
    13618  json.EndArray();
    13619 
    13620  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13621  {
    13622  json.WriteString("Stats");
    13623  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13624  }
    13625 
    13626  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13627  {
    13628  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13629  {
    13630  json.BeginString("Type ");
    13631  json.ContinueString(typeIndex);
    13632  json.EndString();
    13633 
    13634  json.BeginObject();
    13635 
    13636  json.WriteString("Flags");
    13637  json.BeginArray(true);
    13638  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13639  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13640  {
    13641  json.WriteString("DEVICE_LOCAL");
    13642  }
    13643  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13644  {
    13645  json.WriteString("HOST_VISIBLE");
    13646  }
    13647  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13648  {
    13649  json.WriteString("HOST_COHERENT");
    13650  }
    13651  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13652  {
    13653  json.WriteString("HOST_CACHED");
    13654  }
    13655  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13656  {
    13657  json.WriteString("LAZILY_ALLOCATED");
    13658  }
    13659  json.EndArray();
    13660 
    13661  if(stats.memoryType[typeIndex].blockCount > 0)
    13662  {
    13663  json.WriteString("Stats");
    13664  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13665  }
    13666 
    13667  json.EndObject();
    13668  }
    13669  }
    13670 
    13671  json.EndObject();
    13672  }
    13673  if(detailedMap == VK_TRUE)
    13674  {
    13675  allocator->PrintDetailedMap(json);
    13676  }
    13677 
    13678  json.EndObject();
    13679  }
    13680 
    13681  const size_t len = sb.GetLength();
    13682  char* const pChars = vma_new_array(allocator, char, len + 1);
    13683  if(len > 0)
    13684  {
    13685  memcpy(pChars, sb.GetData(), len);
    13686  }
    13687  pChars[len] = '\0';
    13688  *ppStatsString = pChars;
    13689 }
    13690 
    13691 void vmaFreeStatsString(
    13692  VmaAllocator allocator,
    13693  char* pStatsString)
    13694 {
    13695  if(pStatsString != VMA_NULL)
    13696  {
    13697  VMA_ASSERT(allocator);
    13698  size_t len = strlen(pStatsString);
    13699  vma_delete_array(allocator, pStatsString, len + 1);
    13700  }
    13701 }
    13702 
    13703 #endif // #if VMA_STATS_STRING_ENABLED
    13704 
    13705 /*
    13706 This function is not protected by any mutex because it just reads immutable data.
    13707 */
    13708 VkResult vmaFindMemoryTypeIndex(
    13709  VmaAllocator allocator,
    13710  uint32_t memoryTypeBits,
    13711  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13712  uint32_t* pMemoryTypeIndex)
    13713 {
    13714  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13715  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13716  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13717 
    13718  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13719  {
    13720  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13721  }
    13722 
    13723  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13724  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13725 
    13726  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13727  if(mapped)
    13728  {
    13729  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13730  }
    13731 
    13732  // Convert usage to requiredFlags and preferredFlags.
    13733  switch(pAllocationCreateInfo->usage)
    13734  {
    13736  break;
    13738  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13739  {
    13740  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13741  }
    13742  break;
    13744  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13745  break;
    13747  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13748  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13749  {
    13750  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13751  }
    13752  break;
    13754  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13755  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13756  break;
    13757  default:
    13758  break;
    13759  }
    13760 
    13761  *pMemoryTypeIndex = UINT32_MAX;
    13762  uint32_t minCost = UINT32_MAX;
    13763  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13764  memTypeIndex < allocator->GetMemoryTypeCount();
    13765  ++memTypeIndex, memTypeBit <<= 1)
    13766  {
    13767  // This memory type is acceptable according to memoryTypeBits bitmask.
    13768  if((memTypeBit & memoryTypeBits) != 0)
    13769  {
    13770  const VkMemoryPropertyFlags currFlags =
    13771  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13772  // This memory type contains requiredFlags.
    13773  if((requiredFlags & ~currFlags) == 0)
    13774  {
    13775  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13776  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13777  // Remember memory type with lowest cost.
    13778  if(currCost < minCost)
    13779  {
    13780  *pMemoryTypeIndex = memTypeIndex;
    13781  if(currCost == 0)
    13782  {
    13783  return VK_SUCCESS;
    13784  }
    13785  minCost = currCost;
    13786  }
    13787  }
    13788  }
    13789  }
    13790  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13791 }
    13792 
    13794  VmaAllocator allocator,
    13795  const VkBufferCreateInfo* pBufferCreateInfo,
    13796  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13797  uint32_t* pMemoryTypeIndex)
    13798 {
    13799  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13800  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13802  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13803 
    13804  const VkDevice hDev = allocator->m_hDevice;
    13805  VkBuffer hBuffer = VK_NULL_HANDLE;
    13806  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13807  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13808  if(res == VK_SUCCESS)
    13809  {
    13810  VkMemoryRequirements memReq = {};
    13811  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13812  hDev, hBuffer, &memReq);
    13813 
    13814  res = vmaFindMemoryTypeIndex(
    13815  allocator,
    13816  memReq.memoryTypeBits,
    13817  pAllocationCreateInfo,
    13818  pMemoryTypeIndex);
    13819 
    13820  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13821  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13822  }
    13823  return res;
    13824 }
    13825 
    13827  VmaAllocator allocator,
    13828  const VkImageCreateInfo* pImageCreateInfo,
    13829  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13830  uint32_t* pMemoryTypeIndex)
    13831 {
    13832  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13833  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13835  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13836 
    13837  const VkDevice hDev = allocator->m_hDevice;
    13838  VkImage hImage = VK_NULL_HANDLE;
    13839  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13840  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13841  if(res == VK_SUCCESS)
    13842  {
    13843  VkMemoryRequirements memReq = {};
    13844  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13845  hDev, hImage, &memReq);
    13846 
    13847  res = vmaFindMemoryTypeIndex(
    13848  allocator,
    13849  memReq.memoryTypeBits,
    13850  pAllocationCreateInfo,
    13851  pMemoryTypeIndex);
    13852 
    13853  allocator->GetVulkanFunctions().vkDestroyImage(
    13854  hDev, hImage, allocator->GetAllocationCallbacks());
    13855  }
    13856  return res;
    13857 }
    13858 
    13859 VkResult vmaCreatePool(
    13860  VmaAllocator allocator,
    13861  const VmaPoolCreateInfo* pCreateInfo,
    13862  VmaPool* pPool)
    13863 {
    13864  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13865 
    13866  VMA_DEBUG_LOG("vmaCreatePool");
    13867 
    13868  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13869 
    13870  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13876  }
    13877 #endif
    13878 
    13879  return res;
    13880 }
    13881 
    13882 void vmaDestroyPool(
    13883  VmaAllocator allocator,
    13884  VmaPool pool)
    13885 {
    13886  VMA_ASSERT(allocator);
    13887 
    13888  if(pool == VK_NULL_HANDLE)
    13889  {
    13890  return;
    13891  }
    13892 
    13893  VMA_DEBUG_LOG("vmaDestroyPool");
    13894 
    13895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13896 
    13897 #if VMA_RECORDING_ENABLED
    13898  if(allocator->GetRecorder() != VMA_NULL)
    13899  {
    13900  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13901  }
    13902 #endif
    13903 
    13904  allocator->DestroyPool(pool);
    13905 }
    13906 
    13907 void vmaGetPoolStats(
    13908  VmaAllocator allocator,
    13909  VmaPool pool,
    13910  VmaPoolStats* pPoolStats)
    13911 {
    13912  VMA_ASSERT(allocator && pool && pPoolStats);
    13913 
    13914  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13915 
    13916  allocator->GetPoolStats(pool, pPoolStats);
    13917 }
    13918 
    13920  VmaAllocator allocator,
    13921  VmaPool pool,
    13922  size_t* pLostAllocationCount)
    13923 {
    13924  VMA_ASSERT(allocator && pool);
    13925 
    13926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13927 
    13928 #if VMA_RECORDING_ENABLED
    13929  if(allocator->GetRecorder() != VMA_NULL)
    13930  {
    13931  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13932  }
    13933 #endif
    13934 
    13935  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13936 }
    13937 
    13938 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13939 {
    13940  VMA_ASSERT(allocator && pool);
    13941 
    13942  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13943 
    13944  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13945 
    13946  return allocator->CheckPoolCorruption(pool);
    13947 }
    13948 
    13949 VkResult vmaAllocateMemory(
    13950  VmaAllocator allocator,
    13951  const VkMemoryRequirements* pVkMemoryRequirements,
    13952  const VmaAllocationCreateInfo* pCreateInfo,
    13953  VmaAllocation* pAllocation,
    13954  VmaAllocationInfo* pAllocationInfo)
    13955 {
    13956  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13957 
    13958  VMA_DEBUG_LOG("vmaAllocateMemory");
    13959 
    13960  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13961 
    13962  VkResult result = allocator->AllocateMemory(
    13963  *pVkMemoryRequirements,
    13964  false, // requiresDedicatedAllocation
    13965  false, // prefersDedicatedAllocation
    13966  VK_NULL_HANDLE, // dedicatedBuffer
    13967  VK_NULL_HANDLE, // dedicatedImage
    13968  *pCreateInfo,
    13969  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13970  pAllocation);
    13971 
    13972 #if VMA_RECORDING_ENABLED
    13973  if(allocator->GetRecorder() != VMA_NULL)
    13974  {
    13975  allocator->GetRecorder()->RecordAllocateMemory(
    13976  allocator->GetCurrentFrameIndex(),
    13977  *pVkMemoryRequirements,
    13978  *pCreateInfo,
    13979  *pAllocation);
    13980  }
    13981 #endif
    13982 
    13983  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13984  {
    13985  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13986  }
    13987 
    13988  return result;
    13989 }
    13990 
    13992  VmaAllocator allocator,
    13993  VkBuffer buffer,
    13994  const VmaAllocationCreateInfo* pCreateInfo,
    13995  VmaAllocation* pAllocation,
    13996  VmaAllocationInfo* pAllocationInfo)
    13997 {
    13998  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13999 
    14000  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14001 
    14002  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14003 
    14004  VkMemoryRequirements vkMemReq = {};
    14005  bool requiresDedicatedAllocation = false;
    14006  bool prefersDedicatedAllocation = false;
    14007  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14008  requiresDedicatedAllocation,
    14009  prefersDedicatedAllocation);
    14010 
    14011  VkResult result = allocator->AllocateMemory(
    14012  vkMemReq,
    14013  requiresDedicatedAllocation,
    14014  prefersDedicatedAllocation,
    14015  buffer, // dedicatedBuffer
    14016  VK_NULL_HANDLE, // dedicatedImage
    14017  *pCreateInfo,
    14018  VMA_SUBALLOCATION_TYPE_BUFFER,
    14019  pAllocation);
    14020 
    14021 #if VMA_RECORDING_ENABLED
    14022  if(allocator->GetRecorder() != VMA_NULL)
    14023  {
    14024  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14025  allocator->GetCurrentFrameIndex(),
    14026  vkMemReq,
    14027  requiresDedicatedAllocation,
    14028  prefersDedicatedAllocation,
    14029  *pCreateInfo,
    14030  *pAllocation);
    14031  }
    14032 #endif
    14033 
    14034  if(pAllocationInfo && result == VK_SUCCESS)
    14035  {
    14036  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14037  }
    14038 
    14039  return result;
    14040 }
    14041 
    14042 VkResult vmaAllocateMemoryForImage(
    14043  VmaAllocator allocator,
    14044  VkImage image,
    14045  const VmaAllocationCreateInfo* pCreateInfo,
    14046  VmaAllocation* pAllocation,
    14047  VmaAllocationInfo* pAllocationInfo)
    14048 {
    14049  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14050 
    14051  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14052 
    14053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14054 
    14055  VkMemoryRequirements vkMemReq = {};
    14056  bool requiresDedicatedAllocation = false;
    14057  bool prefersDedicatedAllocation = false;
    14058  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14059  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14060 
    14061  VkResult result = allocator->AllocateMemory(
    14062  vkMemReq,
    14063  requiresDedicatedAllocation,
    14064  prefersDedicatedAllocation,
    14065  VK_NULL_HANDLE, // dedicatedBuffer
    14066  image, // dedicatedImage
    14067  *pCreateInfo,
    14068  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14069  pAllocation);
    14070 
    14071 #if VMA_RECORDING_ENABLED
    14072  if(allocator->GetRecorder() != VMA_NULL)
    14073  {
    14074  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14075  allocator->GetCurrentFrameIndex(),
    14076  vkMemReq,
    14077  requiresDedicatedAllocation,
    14078  prefersDedicatedAllocation,
    14079  *pCreateInfo,
    14080  *pAllocation);
    14081  }
    14082 #endif
    14083 
    14084  if(pAllocationInfo && result == VK_SUCCESS)
    14085  {
    14086  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14087  }
    14088 
    14089  return result;
    14090 }
    14091 
    14092 void vmaFreeMemory(
    14093  VmaAllocator allocator,
    14094  VmaAllocation allocation)
    14095 {
    14096  VMA_ASSERT(allocator);
    14097 
    14098  if(allocation == VK_NULL_HANDLE)
    14099  {
    14100  return;
    14101  }
    14102 
    14103  VMA_DEBUG_LOG("vmaFreeMemory");
    14104 
    14105  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14106 
    14107 #if VMA_RECORDING_ENABLED
    14108  if(allocator->GetRecorder() != VMA_NULL)
    14109  {
    14110  allocator->GetRecorder()->RecordFreeMemory(
    14111  allocator->GetCurrentFrameIndex(),
    14112  allocation);
    14113  }
    14114 #endif
    14115 
    14116  allocator->FreeMemory(allocation);
    14117 }
    14118 
    14119 VkResult vmaResizeAllocation(
    14120  VmaAllocator allocator,
    14121  VmaAllocation allocation,
    14122  VkDeviceSize newSize)
    14123 {
    14124  VMA_ASSERT(allocator && allocation);
    14125 
    14126  VMA_DEBUG_LOG("vmaResizeAllocation");
    14127 
    14128  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14129 
    14130 #if VMA_RECORDING_ENABLED
    14131  if(allocator->GetRecorder() != VMA_NULL)
    14132  {
    14133  allocator->GetRecorder()->RecordResizeAllocation(
    14134  allocator->GetCurrentFrameIndex(),
    14135  allocation,
    14136  newSize);
    14137  }
    14138 #endif
    14139 
    14140  return allocator->ResizeAllocation(allocation, newSize);
    14141 }
    14142 
    14144  VmaAllocator allocator,
    14145  VmaAllocation allocation,
    14146  VmaAllocationInfo* pAllocationInfo)
    14147 {
    14148  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14149 
    14150  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14151 
    14152 #if VMA_RECORDING_ENABLED
    14153  if(allocator->GetRecorder() != VMA_NULL)
    14154  {
    14155  allocator->GetRecorder()->RecordGetAllocationInfo(
    14156  allocator->GetCurrentFrameIndex(),
    14157  allocation);
    14158  }
    14159 #endif
    14160 
    14161  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14162 }
    14163 
    14164 VkBool32 vmaTouchAllocation(
    14165  VmaAllocator allocator,
    14166  VmaAllocation allocation)
    14167 {
    14168  VMA_ASSERT(allocator && allocation);
    14169 
    14170  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14171 
    14172 #if VMA_RECORDING_ENABLED
    14173  if(allocator->GetRecorder() != VMA_NULL)
    14174  {
    14175  allocator->GetRecorder()->RecordTouchAllocation(
    14176  allocator->GetCurrentFrameIndex(),
    14177  allocation);
    14178  }
    14179 #endif
    14180 
    14181  return allocator->TouchAllocation(allocation);
    14182 }
    14183 
    14185  VmaAllocator allocator,
    14186  VmaAllocation allocation,
    14187  void* pUserData)
    14188 {
    14189  VMA_ASSERT(allocator && allocation);
    14190 
    14191  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14192 
    14193  allocation->SetUserData(allocator, pUserData);
    14194 
    14195 #if VMA_RECORDING_ENABLED
    14196  if(allocator->GetRecorder() != VMA_NULL)
    14197  {
    14198  allocator->GetRecorder()->RecordSetAllocationUserData(
    14199  allocator->GetCurrentFrameIndex(),
    14200  allocation,
    14201  pUserData);
    14202  }
    14203 #endif
    14204 }
    14205 
    14207  VmaAllocator allocator,
    14208  VmaAllocation* pAllocation)
    14209 {
    14210  VMA_ASSERT(allocator && pAllocation);
    14211 
    14212  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14213 
    14214  allocator->CreateLostAllocation(pAllocation);
    14215 
    14216 #if VMA_RECORDING_ENABLED
    14217  if(allocator->GetRecorder() != VMA_NULL)
    14218  {
    14219  allocator->GetRecorder()->RecordCreateLostAllocation(
    14220  allocator->GetCurrentFrameIndex(),
    14221  *pAllocation);
    14222  }
    14223 #endif
    14224 }
    14225 
    14226 VkResult vmaMapMemory(
    14227  VmaAllocator allocator,
    14228  VmaAllocation allocation,
    14229  void** ppData)
    14230 {
    14231  VMA_ASSERT(allocator && allocation && ppData);
    14232 
    14233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14234 
    14235  VkResult res = allocator->Map(allocation, ppData);
    14236 
    14237 #if VMA_RECORDING_ENABLED
    14238  if(allocator->GetRecorder() != VMA_NULL)
    14239  {
    14240  allocator->GetRecorder()->RecordMapMemory(
    14241  allocator->GetCurrentFrameIndex(),
    14242  allocation);
    14243  }
    14244 #endif
    14245 
    14246  return res;
    14247 }
    14248 
    14249 void vmaUnmapMemory(
    14250  VmaAllocator allocator,
    14251  VmaAllocation allocation)
    14252 {
    14253  VMA_ASSERT(allocator && allocation);
    14254 
    14255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14256 
    14257 #if VMA_RECORDING_ENABLED
    14258  if(allocator->GetRecorder() != VMA_NULL)
    14259  {
    14260  allocator->GetRecorder()->RecordUnmapMemory(
    14261  allocator->GetCurrentFrameIndex(),
    14262  allocation);
    14263  }
    14264 #endif
    14265 
    14266  allocator->Unmap(allocation);
    14267 }
    14268 
    14269 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14270 {
    14271  VMA_ASSERT(allocator && allocation);
    14272 
    14273  VMA_DEBUG_LOG("vmaFlushAllocation");
    14274 
    14275  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14276 
    14277  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14278 
    14279 #if VMA_RECORDING_ENABLED
    14280  if(allocator->GetRecorder() != VMA_NULL)
    14281  {
    14282  allocator->GetRecorder()->RecordFlushAllocation(
    14283  allocator->GetCurrentFrameIndex(),
    14284  allocation, offset, size);
    14285  }
    14286 #endif
    14287 }
    14288 
    14289 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14290 {
    14291  VMA_ASSERT(allocator && allocation);
    14292 
    14293  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14294 
    14295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14296 
    14297  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14298 
    14299 #if VMA_RECORDING_ENABLED
    14300  if(allocator->GetRecorder() != VMA_NULL)
    14301  {
    14302  allocator->GetRecorder()->RecordInvalidateAllocation(
    14303  allocator->GetCurrentFrameIndex(),
    14304  allocation, offset, size);
    14305  }
    14306 #endif
    14307 }
    14308 
    14309 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14310 {
    14311  VMA_ASSERT(allocator);
    14312 
    14313  VMA_DEBUG_LOG("vmaCheckCorruption");
    14314 
    14315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14316 
    14317  return allocator->CheckCorruption(memoryTypeBits);
    14318 }
    14319 
    14320 VkResult vmaDefragment(
    14321  VmaAllocator allocator,
    14322  VmaAllocation* pAllocations,
    14323  size_t allocationCount,
    14324  VkBool32* pAllocationsChanged,
    14325  const VmaDefragmentationInfo *pDefragmentationInfo,
    14326  VmaDefragmentationStats* pDefragmentationStats)
    14327 {
    14328  VMA_ASSERT(allocator && pAllocations);
    14329 
    14330  VMA_DEBUG_LOG("vmaDefragment");
    14331 
    14332  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14333 
    14334  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14335 }
    14336 
    14337 VkResult vmaBindBufferMemory(
    14338  VmaAllocator allocator,
    14339  VmaAllocation allocation,
    14340  VkBuffer buffer)
    14341 {
    14342  VMA_ASSERT(allocator && allocation && buffer);
    14343 
    14344  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14345 
    14346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14347 
    14348  return allocator->BindBufferMemory(allocation, buffer);
    14349 }
    14350 
    14351 VkResult vmaBindImageMemory(
    14352  VmaAllocator allocator,
    14353  VmaAllocation allocation,
    14354  VkImage image)
    14355 {
    14356  VMA_ASSERT(allocator && allocation && image);
    14357 
    14358  VMA_DEBUG_LOG("vmaBindImageMemory");
    14359 
    14360  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14361 
    14362  return allocator->BindImageMemory(allocation, image);
    14363 }
    14364 
    14365 VkResult vmaCreateBuffer(
    14366  VmaAllocator allocator,
    14367  const VkBufferCreateInfo* pBufferCreateInfo,
    14368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14369  VkBuffer* pBuffer,
    14370  VmaAllocation* pAllocation,
    14371  VmaAllocationInfo* pAllocationInfo)
    14372 {
    14373  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14374 
    14375  if(pBufferCreateInfo->size == 0)
    14376  {
    14377  return VK_ERROR_VALIDATION_FAILED_EXT;
    14378  }
    14379 
    14380  VMA_DEBUG_LOG("vmaCreateBuffer");
    14381 
    14382  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14383 
    14384  *pBuffer = VK_NULL_HANDLE;
    14385  *pAllocation = VK_NULL_HANDLE;
    14386 
    14387  // 1. Create VkBuffer.
    14388  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14389  allocator->m_hDevice,
    14390  pBufferCreateInfo,
    14391  allocator->GetAllocationCallbacks(),
    14392  pBuffer);
    14393  if(res >= 0)
    14394  {
    14395  // 2. vkGetBufferMemoryRequirements.
    14396  VkMemoryRequirements vkMemReq = {};
    14397  bool requiresDedicatedAllocation = false;
    14398  bool prefersDedicatedAllocation = false;
    14399  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14400  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14401 
    14402  // Make sure alignment requirements for specific buffer usages reported
    14403  // in Physical Device Properties are included in alignment reported by memory requirements.
    14404  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14405  {
    14406  VMA_ASSERT(vkMemReq.alignment %
    14407  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14408  }
    14409  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14410  {
    14411  VMA_ASSERT(vkMemReq.alignment %
    14412  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14413  }
    14414  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14415  {
    14416  VMA_ASSERT(vkMemReq.alignment %
    14417  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14418  }
    14419 
    14420  // 3. Allocate memory using allocator.
    14421  res = allocator->AllocateMemory(
    14422  vkMemReq,
    14423  requiresDedicatedAllocation,
    14424  prefersDedicatedAllocation,
    14425  *pBuffer, // dedicatedBuffer
    14426  VK_NULL_HANDLE, // dedicatedImage
    14427  *pAllocationCreateInfo,
    14428  VMA_SUBALLOCATION_TYPE_BUFFER,
    14429  pAllocation);
    14430 
    14431 #if VMA_RECORDING_ENABLED
    14432  if(allocator->GetRecorder() != VMA_NULL)
    14433  {
    14434  allocator->GetRecorder()->RecordCreateBuffer(
    14435  allocator->GetCurrentFrameIndex(),
    14436  *pBufferCreateInfo,
    14437  *pAllocationCreateInfo,
    14438  *pAllocation);
    14439  }
    14440 #endif
    14441 
    14442  if(res >= 0)
    14443  {
    14444  // 3. Bind buffer with memory.
    14445  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14446  if(res >= 0)
    14447  {
    14448  // All steps succeeded.
    14449  #if VMA_STATS_STRING_ENABLED
    14450  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14451  #endif
    14452  if(pAllocationInfo != VMA_NULL)
    14453  {
    14454  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14455  }
    14456 
    14457  return VK_SUCCESS;
    14458  }
    14459  allocator->FreeMemory(*pAllocation);
    14460  *pAllocation = VK_NULL_HANDLE;
    14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14462  *pBuffer = VK_NULL_HANDLE;
    14463  return res;
    14464  }
    14465  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14466  *pBuffer = VK_NULL_HANDLE;
    14467  return res;
    14468  }
    14469  return res;
    14470 }
    14471 
    14472 void vmaDestroyBuffer(
    14473  VmaAllocator allocator,
    14474  VkBuffer buffer,
    14475  VmaAllocation allocation)
    14476 {
    14477  VMA_ASSERT(allocator);
    14478 
    14479  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14480  {
    14481  return;
    14482  }
    14483 
    14484  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14485 
    14486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14487 
    14488 #if VMA_RECORDING_ENABLED
    14489  if(allocator->GetRecorder() != VMA_NULL)
    14490  {
    14491  allocator->GetRecorder()->RecordDestroyBuffer(
    14492  allocator->GetCurrentFrameIndex(),
    14493  allocation);
    14494  }
    14495 #endif
    14496 
    14497  if(buffer != VK_NULL_HANDLE)
    14498  {
    14499  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14500  }
    14501 
    14502  if(allocation != VK_NULL_HANDLE)
    14503  {
    14504  allocator->FreeMemory(allocation);
    14505  }
    14506 }
    14507 
    14508 VkResult vmaCreateImage(
    14509  VmaAllocator allocator,
    14510  const VkImageCreateInfo* pImageCreateInfo,
    14511  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14512  VkImage* pImage,
    14513  VmaAllocation* pAllocation,
    14514  VmaAllocationInfo* pAllocationInfo)
    14515 {
    14516  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14517 
    14518  if(pImageCreateInfo->extent.width == 0 ||
    14519  pImageCreateInfo->extent.height == 0 ||
    14520  pImageCreateInfo->extent.depth == 0 ||
    14521  pImageCreateInfo->mipLevels == 0 ||
    14522  pImageCreateInfo->arrayLayers == 0)
    14523  {
    14524  return VK_ERROR_VALIDATION_FAILED_EXT;
    14525  }
    14526 
    14527  VMA_DEBUG_LOG("vmaCreateImage");
    14528 
    14529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14530 
    14531  *pImage = VK_NULL_HANDLE;
    14532  *pAllocation = VK_NULL_HANDLE;
    14533 
    14534  // 1. Create VkImage.
    14535  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14536  allocator->m_hDevice,
    14537  pImageCreateInfo,
    14538  allocator->GetAllocationCallbacks(),
    14539  pImage);
    14540  if(res >= 0)
    14541  {
    14542  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14544  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14545 
    14546  // 2. Allocate memory using allocator.
    14547  VkMemoryRequirements vkMemReq = {};
    14548  bool requiresDedicatedAllocation = false;
    14549  bool prefersDedicatedAllocation = false;
    14550  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14551  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14552 
    14553  res = allocator->AllocateMemory(
    14554  vkMemReq,
    14555  requiresDedicatedAllocation,
    14556  prefersDedicatedAllocation,
    14557  VK_NULL_HANDLE, // dedicatedBuffer
    14558  *pImage, // dedicatedImage
    14559  *pAllocationCreateInfo,
    14560  suballocType,
    14561  pAllocation);
    14562 
    14563 #if VMA_RECORDING_ENABLED
    14564  if(allocator->GetRecorder() != VMA_NULL)
    14565  {
    14566  allocator->GetRecorder()->RecordCreateImage(
    14567  allocator->GetCurrentFrameIndex(),
    14568  *pImageCreateInfo,
    14569  *pAllocationCreateInfo,
    14570  *pAllocation);
    14571  }
    14572 #endif
    14573 
    14574  if(res >= 0)
    14575  {
    14576  // 3. Bind image with memory.
    14577  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14578  if(res >= 0)
    14579  {
    14580  // All steps succeeded.
    14581  #if VMA_STATS_STRING_ENABLED
    14582  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14583  #endif
    14584  if(pAllocationInfo != VMA_NULL)
    14585  {
    14586  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14587  }
    14588 
    14589  return VK_SUCCESS;
    14590  }
    14591  allocator->FreeMemory(*pAllocation);
    14592  *pAllocation = VK_NULL_HANDLE;
    14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14594  *pImage = VK_NULL_HANDLE;
    14595  return res;
    14596  }
    14597  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14598  *pImage = VK_NULL_HANDLE;
    14599  return res;
    14600  }
    14601  return res;
    14602 }
    14603 
    14604 void vmaDestroyImage(
    14605  VmaAllocator allocator,
    14606  VkImage image,
    14607  VmaAllocation allocation)
    14608 {
    14609  VMA_ASSERT(allocator);
    14610 
    14611  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14612  {
    14613  return;
    14614  }
    14615 
    14616  VMA_DEBUG_LOG("vmaDestroyImage");
    14617 
    14618  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14619 
    14620 #if VMA_RECORDING_ENABLED
    14621  if(allocator->GetRecorder() != VMA_NULL)
    14622  {
    14623  allocator->GetRecorder()->RecordDestroyImage(
    14624  allocator->GetCurrentFrameIndex(),
    14625  allocation);
    14626  }
    14627 #endif
    14628 
    14629  if(image != VK_NULL_HANDLE)
    14630  {
    14631  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14632  }
    14633  if(allocation != VK_NULL_HANDLE)
    14634  {
    14635  allocator->FreeMemory(allocation);
    14636  }
    14637 }
    14638 
    14639 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1888
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1643
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    Definition: vk_mem_alloc.h:1617
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2212
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2213
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1598
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1844
    -
    Definition: vk_mem_alloc.h:1947
    +
    Definition: vk_mem_alloc.h:1845
    +
    Definition: vk_mem_alloc.h:1948
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1590
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2312
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2313
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1640
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2582
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2101
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2583
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2102
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1487
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2193
    -
    Definition: vk_mem_alloc.h:1924
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2194
    +
    Definition: vk_mem_alloc.h:1925
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1579
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2000
    -
    Definition: vk_mem_alloc.h:1871
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2001
    +
    Definition: vk_mem_alloc.h:1872
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1652
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2129
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2130
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1705
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1706
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1637
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1875
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1876
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1777
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1778
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1595
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1776
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2586
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1777
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2587
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1669
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1786
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2594
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1984
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2577
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1787
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2595
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1985
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2578
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1596
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1521
    Represents main object of this library initialized.
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1646
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2143
    -
    Definition: vk_mem_alloc.h:2137
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1712
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2322
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2144
    +
    Definition: vk_mem_alloc.h:2138
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1713
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2323
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1591
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1615
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2021
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2163
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2199
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2022
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2164
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2200
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1577
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2146
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2147
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1822
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1823
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2572
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2573
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2590
    -
    Definition: vk_mem_alloc.h:1861
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2008
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2591
    +
    Definition: vk_mem_alloc.h:1862
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2009
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1594
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1782
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1783
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1527
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1548
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1619
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1553
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2592
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2593
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1995
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2209
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1996
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2210
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1587
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1765
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2158
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1766
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2159
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1540
    -
    Definition: vk_mem_alloc.h:2133
    +
    Definition: vk_mem_alloc.h:2134
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1931
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1778
    +
    Definition: vk_mem_alloc.h:1932
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1779
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1544
    -
    Definition: vk_mem_alloc.h:1958
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2149
    -
    Definition: vk_mem_alloc.h:1870
    +
    Definition: vk_mem_alloc.h:1959
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2150
    +
    Definition: vk_mem_alloc.h:1871
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1593
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1990
    -
    Definition: vk_mem_alloc.h:1981
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1991
    +
    Definition: vk_mem_alloc.h:1982
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1768
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1769
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1589
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2171
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2172
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1655
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2202
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1979
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2014
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2203
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1980
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2015
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1693
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1784
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1911
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1777
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1694
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1785
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1912
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1778
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1600
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1599
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2185
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2186
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1592
    -
    Definition: vk_mem_alloc.h:1942
    +
    Definition: vk_mem_alloc.h:1943
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1633
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2336
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2337
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1649
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1777
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1774
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1778
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1775
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2190
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2191
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1951
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2317
    -
    Definition: vk_mem_alloc.h:1965
    -
    Definition: vk_mem_alloc.h:1977
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2588
    +
    Definition: vk_mem_alloc.h:1952
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2318
    +
    Definition: vk_mem_alloc.h:1966
    +
    Definition: vk_mem_alloc.h:1978
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2589
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1585
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1772
    -
    Definition: vk_mem_alloc.h:1827
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2139
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1773
    +
    Definition: vk_mem_alloc.h:1828
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2140
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1622
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1770
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1771
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1597
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1601
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1898
    -
    Definition: vk_mem_alloc.h:1972
    -
    Definition: vk_mem_alloc.h:1854
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2331
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1899
    +
    Definition: vk_mem_alloc.h:1973
    +
    Definition: vk_mem_alloc.h:1855
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2332
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1575
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1588
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2118
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2119
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2298
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2299
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1962
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2083
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1778
    +
    Definition: vk_mem_alloc.h:1963
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2084
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1779
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1609
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1785
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1786
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2196
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1778
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2197
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1779
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2303
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2304