From 99f2d3543c2b9f0c0c3edbeabf5566c83730fb47 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Fri, 16 Nov 2018 13:48:11 +0100 Subject: [PATCH] Minor update in documentation. --- docs/html/vk__mem__alloc_8h.html | 3 +- docs/html/vk__mem__alloc_8h_source.html | 104 ++++++++++++------------ src/vk_mem_alloc.h | 3 + 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 5d84dc1..4a1946b 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -825,7 +825,8 @@ Functions VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT 

Allocation strategy that chooses biggest possible free range for the allocation.

-VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT 

Allocation strategy that chooses first suitable free range for the allocation.

+VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT 

Allocation strategy that chooses first suitable free range for the allocation.

+

"First" doesn't necessarily means the one with smallest offset in memory, but rather the one that is easiest and fastest to find.

VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT 

Allocation strategy that tries to minimize memory usage.

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index cc767f1..96303b9 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1479 /*
1480 Define this macro to 0/1 to disable/enable support for recording functionality,
1481 available through VmaAllocatorCreateInfo::pRecordSettings.
1482 */
1483 #ifndef VMA_RECORDING_ENABLED
1484  #ifdef _WIN32
1485  #define VMA_RECORDING_ENABLED 1
1486  #else
1487  #define VMA_RECORDING_ENABLED 0
1488  #endif
1489 #endif
1490 
1491 #ifndef NOMINMAX
1492  #define NOMINMAX // For windows.h
1493 #endif
1494 
1495 #include <vulkan/vulkan.h>
1496 
1497 #if VMA_RECORDING_ENABLED
1498  #include <windows.h>
1499 #endif
1500 
1501 #if !defined(VMA_DEDICATED_ALLOCATION)
1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1503  #define VMA_DEDICATED_ALLOCATION 1
1504  #else
1505  #define VMA_DEDICATED_ALLOCATION 0
1506  #endif
1507 #endif
1508 
1518 VK_DEFINE_HANDLE(VmaAllocator)
1519 
1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1522  VmaAllocator allocator,
1523  uint32_t memoryType,
1524  VkDeviceMemory memory,
1525  VkDeviceSize size);
1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1528  VmaAllocator allocator,
1529  uint32_t memoryType,
1530  VkDeviceMemory memory,
1531  VkDeviceSize size);
1532 
1546 
1576 
1579 typedef VkFlags VmaAllocatorCreateFlags;
1580 
1585 typedef struct VmaVulkanFunctions {
1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1588  PFN_vkAllocateMemory vkAllocateMemory;
1589  PFN_vkFreeMemory vkFreeMemory;
1590  PFN_vkMapMemory vkMapMemory;
1591  PFN_vkUnmapMemory vkUnmapMemory;
1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1594  PFN_vkBindBufferMemory vkBindBufferMemory;
1595  PFN_vkBindImageMemory vkBindImageMemory;
1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1598  PFN_vkCreateBuffer vkCreateBuffer;
1599  PFN_vkDestroyBuffer vkDestroyBuffer;
1600  PFN_vkCreateImage vkCreateImage;
1601  PFN_vkDestroyImage vkDestroyImage;
1602 #if VMA_DEDICATED_ALLOCATION
1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1605 #endif
1607 
1609 typedef enum VmaRecordFlagBits {
1616 
1619 typedef VkFlags VmaRecordFlags;
1620 
1622 typedef struct VmaRecordSettings
1623 {
1633  const char* pFilePath;
1635 
1638 {
1642 
1643  VkPhysicalDevice physicalDevice;
1645 
1646  VkDevice device;
1648 
1651 
1652  const VkAllocationCallbacks* pAllocationCallbacks;
1654 
1693  const VkDeviceSize* pHeapSizeLimit;
1714 
1716 VkResult vmaCreateAllocator(
1717  const VmaAllocatorCreateInfo* pCreateInfo,
1718  VmaAllocator* pAllocator);
1719 
1721 void vmaDestroyAllocator(
1722  VmaAllocator allocator);
1723 
1729  VmaAllocator allocator,
1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1731 
1737  VmaAllocator allocator,
1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1739 
1747  VmaAllocator allocator,
1748  uint32_t memoryTypeIndex,
1749  VkMemoryPropertyFlags* pFlags);
1750 
1760  VmaAllocator allocator,
1761  uint32_t frameIndex);
1762 
1765 typedef struct VmaStatInfo
1766 {
1768  uint32_t blockCount;
1774  VkDeviceSize usedBytes;
1776  VkDeviceSize unusedBytes;
1779 } VmaStatInfo;
1780 
1782 typedef struct VmaStats
1783 {
1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1787 } VmaStats;
1788 
1790 void vmaCalculateStats(
1791  VmaAllocator allocator,
1792  VmaStats* pStats);
1793 
1794 #define VMA_STATS_STRING_ENABLED 1
1795 
1796 #if VMA_STATS_STRING_ENABLED
1797 
1799 
1801 void vmaBuildStatsString(
1802  VmaAllocator allocator,
1803  char** ppStatsString,
1804  VkBool32 detailedMap);
1805 
1806 void vmaFreeStatsString(
1807  VmaAllocator allocator,
1808  char* pStatsString);
1809 
1810 #endif // #if VMA_STATS_STRING_ENABLED
1811 
1820 VK_DEFINE_HANDLE(VmaPool)
1821 
1822 typedef enum VmaMemoryUsage
1823 {
1872 } VmaMemoryUsage;
1873 
1888 
1943 
1956 
1966 
1973 
1977 
1979 {
1992  VkMemoryPropertyFlags requiredFlags;
1997  VkMemoryPropertyFlags preferredFlags;
2005  uint32_t memoryTypeBits;
2018  void* pUserData;
2020 
2037 VkResult vmaFindMemoryTypeIndex(
2038  VmaAllocator allocator,
2039  uint32_t memoryTypeBits,
2040  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2041  uint32_t* pMemoryTypeIndex);
2042 
2056  VmaAllocator allocator,
2057  const VkBufferCreateInfo* pBufferCreateInfo,
2058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2059  uint32_t* pMemoryTypeIndex);
2060 
2074  VmaAllocator allocator,
2075  const VkImageCreateInfo* pImageCreateInfo,
2076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2077  uint32_t* pMemoryTypeIndex);
2078 
2099 
2116 
2127 
2133 
2136 typedef VkFlags VmaPoolCreateFlags;
2137 
2140 typedef struct VmaPoolCreateInfo {
2155  VkDeviceSize blockSize;
2184 
2187 typedef struct VmaPoolStats {
2190  VkDeviceSize size;
2193  VkDeviceSize unusedSize;
2206  VkDeviceSize unusedRangeSizeMax;
2209  size_t blockCount;
2210 } VmaPoolStats;
2211 
2218 VkResult vmaCreatePool(
2219  VmaAllocator allocator,
2220  const VmaPoolCreateInfo* pCreateInfo,
2221  VmaPool* pPool);
2222 
2225 void vmaDestroyPool(
2226  VmaAllocator allocator,
2227  VmaPool pool);
2228 
2235 void vmaGetPoolStats(
2236  VmaAllocator allocator,
2237  VmaPool pool,
2238  VmaPoolStats* pPoolStats);
2239 
2247  VmaAllocator allocator,
2248  VmaPool pool,
2249  size_t* pLostAllocationCount);
2250 
2265 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2266 
2291 VK_DEFINE_HANDLE(VmaAllocation)
2292 
2293 
2295 typedef struct VmaAllocationInfo {
2300  uint32_t memoryType;
2309  VkDeviceMemory deviceMemory;
2314  VkDeviceSize offset;
2319  VkDeviceSize size;
2333  void* pUserData;
2335 
2346 VkResult vmaAllocateMemory(
2347  VmaAllocator allocator,
2348  const VkMemoryRequirements* pVkMemoryRequirements,
2349  const VmaAllocationCreateInfo* pCreateInfo,
2350  VmaAllocation* pAllocation,
2351  VmaAllocationInfo* pAllocationInfo);
2352 
2360  VmaAllocator allocator,
2361  VkBuffer buffer,
2362  const VmaAllocationCreateInfo* pCreateInfo,
2363  VmaAllocation* pAllocation,
2364  VmaAllocationInfo* pAllocationInfo);
2365 
2367 VkResult vmaAllocateMemoryForImage(
2368  VmaAllocator allocator,
2369  VkImage image,
2370  const VmaAllocationCreateInfo* pCreateInfo,
2371  VmaAllocation* pAllocation,
2372  VmaAllocationInfo* pAllocationInfo);
2373 
2375 void vmaFreeMemory(
2376  VmaAllocator allocator,
2377  VmaAllocation allocation);
2378 
2399 VkResult vmaResizeAllocation(
2400  VmaAllocator allocator,
2401  VmaAllocation allocation,
2402  VkDeviceSize newSize);
2403 
2421  VmaAllocator allocator,
2422  VmaAllocation allocation,
2423  VmaAllocationInfo* pAllocationInfo);
2424 
2439 VkBool32 vmaTouchAllocation(
2440  VmaAllocator allocator,
2441  VmaAllocation allocation);
2442 
2457  VmaAllocator allocator,
2458  VmaAllocation allocation,
2459  void* pUserData);
2460 
2472  VmaAllocator allocator,
2473  VmaAllocation* pAllocation);
2474 
2509 VkResult vmaMapMemory(
2510  VmaAllocator allocator,
2511  VmaAllocation allocation,
2512  void** ppData);
2513 
2518 void vmaUnmapMemory(
2519  VmaAllocator allocator,
2520  VmaAllocation allocation);
2521 
2534 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2535 
2548 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2549 
2566 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2567 
2569 typedef struct VmaDefragmentationInfo {
2574  VkDeviceSize maxBytesToMove;
2581 
2583 typedef struct VmaDefragmentationStats {
2585  VkDeviceSize bytesMoved;
2587  VkDeviceSize bytesFreed;
2593 
2632 VkResult vmaDefragment(
2633  VmaAllocator allocator,
2634  VmaAllocation* pAllocations,
2635  size_t allocationCount,
2636  VkBool32* pAllocationsChanged,
2637  const VmaDefragmentationInfo *pDefragmentationInfo,
2638  VmaDefragmentationStats* pDefragmentationStats);
2639 
2652 VkResult vmaBindBufferMemory(
2653  VmaAllocator allocator,
2654  VmaAllocation allocation,
2655  VkBuffer buffer);
2656 
2669 VkResult vmaBindImageMemory(
2670  VmaAllocator allocator,
2671  VmaAllocation allocation,
2672  VkImage image);
2673 
2700 VkResult vmaCreateBuffer(
2701  VmaAllocator allocator,
2702  const VkBufferCreateInfo* pBufferCreateInfo,
2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2704  VkBuffer* pBuffer,
2705  VmaAllocation* pAllocation,
2706  VmaAllocationInfo* pAllocationInfo);
2707 
2719 void vmaDestroyBuffer(
2720  VmaAllocator allocator,
2721  VkBuffer buffer,
2722  VmaAllocation allocation);
2723 
2725 VkResult vmaCreateImage(
2726  VmaAllocator allocator,
2727  const VkImageCreateInfo* pImageCreateInfo,
2728  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2729  VkImage* pImage,
2730  VmaAllocation* pAllocation,
2731  VmaAllocationInfo* pAllocationInfo);
2732 
2744 void vmaDestroyImage(
2745  VmaAllocator allocator,
2746  VkImage image,
2747  VmaAllocation allocation);
2748 
2749 #ifdef __cplusplus
2750 }
2751 #endif
2752 
2753 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2754 
2755 // For Visual Studio IntelliSense.
2756 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2757 #define VMA_IMPLEMENTATION
2758 #endif
2759 
2760 #ifdef VMA_IMPLEMENTATION
2761 #undef VMA_IMPLEMENTATION
2762 
2763 #include <cstdint>
2764 #include <cstdlib>
2765 #include <cstring>
2766 
2767 /*******************************************************************************
2768 CONFIGURATION SECTION
2769 
2770 Define some of these macros before each #include of this header or change them
2771 here if you need other then default behavior depending on your environment.
2772 */
2773 
2774 /*
2775 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2776 internally, like:
2777 
2778  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2779 
2780 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2781 VmaAllocatorCreateInfo::pVulkanFunctions.
2782 */
2783 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2784 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2785 #endif
2786 
2787 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2788 //#define VMA_USE_STL_CONTAINERS 1
2789 
2790 /* Set this macro to 1 to make the library including and using STL containers:
2791 std::pair, std::vector, std::list, std::unordered_map.
2792 
2793 Set it to 0 or undefined to make the library using its own implementation of
2794 the containers.
2795 */
2796 #if VMA_USE_STL_CONTAINERS
2797  #define VMA_USE_STL_VECTOR 1
2798  #define VMA_USE_STL_UNORDERED_MAP 1
2799  #define VMA_USE_STL_LIST 1
2800 #endif
2801 
2802 #if VMA_USE_STL_VECTOR
2803  #include <vector>
2804 #endif
2805 
2806 #if VMA_USE_STL_UNORDERED_MAP
2807  #include <unordered_map>
2808 #endif
2809 
2810 #if VMA_USE_STL_LIST
2811  #include <list>
2812 #endif
2813 
2814 /*
2815 Following headers are used in this CONFIGURATION section only, so feel free to
2816 remove them if not needed.
2817 */
2818 #include <cassert> // for assert
2819 #include <algorithm> // for min, max
2820 #include <mutex> // for std::mutex
2821 #include <atomic> // for std::atomic
2822 
2823 #ifndef VMA_NULL
2824  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2825  #define VMA_NULL nullptr
2826 #endif
2827 
2828 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2829 #include <cstdlib>
2830 void *aligned_alloc(size_t alignment, size_t size)
2831 {
2832  // alignment must be >= sizeof(void*)
2833  if(alignment < sizeof(void*))
2834  {
2835  alignment = sizeof(void*);
2836  }
2837 
2838  return memalign(alignment, size);
2839 }
2840 #elif defined(__APPLE__) || defined(__ANDROID__)
2841 #include <cstdlib>
2842 void *aligned_alloc(size_t alignment, size_t size)
2843 {
2844  // alignment must be >= sizeof(void*)
2845  if(alignment < sizeof(void*))
2846  {
2847  alignment = sizeof(void*);
2848  }
2849 
2850  void *pointer;
2851  if(posix_memalign(&pointer, alignment, size) == 0)
2852  return pointer;
2853  return VMA_NULL;
2854 }
2855 #endif
2856 
2857 // If your compiler is not compatible with C++11 and definition of
2858 // aligned_alloc() function is missing, uncommeting following line may help:
2859 
2860 //#include <malloc.h>
2861 
2862 // Normal assert to check for programmer's errors, especially in Debug configuration.
2863 #ifndef VMA_ASSERT
2864  #ifdef _DEBUG
2865  #define VMA_ASSERT(expr) assert(expr)
2866  #else
2867  #define VMA_ASSERT(expr)
2868  #endif
2869 #endif
2870 
2871 // Assert that will be called very often, like inside data structures e.g. operator[].
2872 // Making it non-empty can make program slow.
2873 #ifndef VMA_HEAVY_ASSERT
2874  #ifdef _DEBUG
2875  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2876  #else
2877  #define VMA_HEAVY_ASSERT(expr)
2878  #endif
2879 #endif
2880 
2881 #ifndef VMA_ALIGN_OF
2882  #define VMA_ALIGN_OF(type) (__alignof(type))
2883 #endif
2884 
2885 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2886  #if defined(_WIN32)
2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2888  #else
2889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2890  #endif
2891 #endif
2892 
2893 #ifndef VMA_SYSTEM_FREE
2894  #if defined(_WIN32)
2895  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2896  #else
2897  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2898  #endif
2899 #endif
2900 
2901 #ifndef VMA_MIN
2902  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2903 #endif
2904 
2905 #ifndef VMA_MAX
2906  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2907 #endif
2908 
2909 #ifndef VMA_SWAP
2910  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2911 #endif
2912 
2913 #ifndef VMA_SORT
2914  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2915 #endif
2916 
2917 #ifndef VMA_DEBUG_LOG
2918  #define VMA_DEBUG_LOG(format, ...)
2919  /*
2920  #define VMA_DEBUG_LOG(format, ...) do { \
2921  printf(format, __VA_ARGS__); \
2922  printf("\n"); \
2923  } while(false)
2924  */
2925 #endif
2926 
2927 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2928 #if VMA_STATS_STRING_ENABLED
2929  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2930  {
2931  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2932  }
2933  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2934  {
2935  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2936  }
2937  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2938  {
2939  snprintf(outStr, strLen, "%p", ptr);
2940  }
2941 #endif
2942 
2943 #ifndef VMA_MUTEX
2944  class VmaMutex
2945  {
2946  public:
2947  VmaMutex() { }
2948  ~VmaMutex() { }
2949  void Lock() { m_Mutex.lock(); }
2950  void Unlock() { m_Mutex.unlock(); }
2951  private:
2952  std::mutex m_Mutex;
2953  };
2954  #define VMA_MUTEX VmaMutex
2955 #endif
2956 
2957 /*
2958 If providing your own implementation, you need to implement a subset of std::atomic:
2959 
2960 - Constructor(uint32_t desired)
2961 - uint32_t load() const
2962 - void store(uint32_t desired)
2963 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2964 */
2965 #ifndef VMA_ATOMIC_UINT32
2966  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2967 #endif
2968 
2969 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2970 
2974  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2975 #endif
2976 
2977 #ifndef VMA_DEBUG_ALIGNMENT
2978 
2982  #define VMA_DEBUG_ALIGNMENT (1)
2983 #endif
2984 
2985 #ifndef VMA_DEBUG_MARGIN
2986 
2990  #define VMA_DEBUG_MARGIN (0)
2991 #endif
2992 
2993 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2994 
2998  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2999 #endif
3000 
3001 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3002 
3007  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3008 #endif
3009 
3010 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3011 
3015  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3016 #endif
3017 
3018 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3019 
3023  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3024 #endif
3025 
3026 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3027  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3029 #endif
3030 
3031 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3032  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3034 #endif
3035 
3036 #ifndef VMA_CLASS_NO_COPY
3037  #define VMA_CLASS_NO_COPY(className) \
3038  private: \
3039  className(const className&) = delete; \
3040  className& operator=(const className&) = delete;
3041 #endif
3042 
3043 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3044 
3045 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3046 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3047 
3048 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3049 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3050 
3051 /*******************************************************************************
3052 END OF CONFIGURATION
3053 */
3054 
3055 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3056  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3057 
3058 // Returns number of bits set to 1 in (v).
3059 static inline uint32_t VmaCountBitsSet(uint32_t v)
3060 {
3061  uint32_t c = v - ((v >> 1) & 0x55555555);
3062  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3063  c = ((c >> 4) + c) & 0x0F0F0F0F;
3064  c = ((c >> 8) + c) & 0x00FF00FF;
3065  c = ((c >> 16) + c) & 0x0000FFFF;
3066  return c;
3067 }
3068 
3069 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3070 // Use types like uint32_t, uint64_t as T.
3071 template <typename T>
3072 static inline T VmaAlignUp(T val, T align)
3073 {
3074  return (val + align - 1) / align * align;
3075 }
3076 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3077 // Use types like uint32_t, uint64_t as T.
3078 template <typename T>
3079 static inline T VmaAlignDown(T val, T align)
3080 {
3081  return val / align * align;
3082 }
3083 
3084 // Division with mathematical rounding to nearest number.
3085 template <typename T>
3086 static inline T VmaRoundDiv(T x, T y)
3087 {
3088  return (x + (y / (T)2)) / y;
3089 }
3090 
3091 /*
3092 Returns true if given number is a power of two.
3093 T must be unsigned integer number or signed integer but always nonnegative.
3094 For 0 returns true.
3095 */
3096 template <typename T>
3097 inline bool VmaIsPow2(T x)
3098 {
3099  return (x & (x-1)) == 0;
3100 }
3101 
3102 // Returns smallest power of 2 greater or equal to v.
3103 static inline uint32_t VmaNextPow2(uint32_t v)
3104 {
3105  v--;
3106  v |= v >> 1;
3107  v |= v >> 2;
3108  v |= v >> 4;
3109  v |= v >> 8;
3110  v |= v >> 16;
3111  v++;
3112  return v;
3113 }
3114 static inline uint64_t VmaNextPow2(uint64_t v)
3115 {
3116  v--;
3117  v |= v >> 1;
3118  v |= v >> 2;
3119  v |= v >> 4;
3120  v |= v >> 8;
3121  v |= v >> 16;
3122  v |= v >> 32;
3123  v++;
3124  return v;
3125 }
3126 
3127 // Returns largest power of 2 less or equal to v.
3128 static inline uint32_t VmaPrevPow2(uint32_t v)
3129 {
3130  v |= v >> 1;
3131  v |= v >> 2;
3132  v |= v >> 4;
3133  v |= v >> 8;
3134  v |= v >> 16;
3135  v = v ^ (v >> 1);
3136  return v;
3137 }
3138 static inline uint64_t VmaPrevPow2(uint64_t v)
3139 {
3140  v |= v >> 1;
3141  v |= v >> 2;
3142  v |= v >> 4;
3143  v |= v >> 8;
3144  v |= v >> 16;
3145  v |= v >> 32;
3146  v = v ^ (v >> 1);
3147  return v;
3148 }
3149 
3150 static inline bool VmaStrIsEmpty(const char* pStr)
3151 {
3152  return pStr == VMA_NULL || *pStr == '\0';
3153 }
3154 
3155 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3156 {
3157  switch(algorithm)
3158  {
3160  return "Linear";
3162  return "Buddy";
3163  case 0:
3164  return "Default";
3165  default:
3166  VMA_ASSERT(0);
3167  return "";
3168  }
3169 }
3170 
3171 #ifndef VMA_SORT
3172 
3173 template<typename Iterator, typename Compare>
3174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3175 {
3176  Iterator centerValue = end; --centerValue;
3177  Iterator insertIndex = beg;
3178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3179  {
3180  if(cmp(*memTypeIndex, *centerValue))
3181  {
3182  if(insertIndex != memTypeIndex)
3183  {
3184  VMA_SWAP(*memTypeIndex, *insertIndex);
3185  }
3186  ++insertIndex;
3187  }
3188  }
3189  if(insertIndex != centerValue)
3190  {
3191  VMA_SWAP(*insertIndex, *centerValue);
3192  }
3193  return insertIndex;
3194 }
3195 
3196 template<typename Iterator, typename Compare>
3197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3198 {
3199  if(beg < end)
3200  {
3201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3204  }
3205 }
3206 
3207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3208 
3209 #endif // #ifndef VMA_SORT
3210 
3211 /*
3212 Returns true if two memory blocks occupy overlapping pages.
3213 ResourceA must be in less memory offset than ResourceB.
3214 
3215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3217 */
3218 static inline bool VmaBlocksOnSamePage(
3219  VkDeviceSize resourceAOffset,
3220  VkDeviceSize resourceASize,
3221  VkDeviceSize resourceBOffset,
3222  VkDeviceSize pageSize)
3223 {
3224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3227  VkDeviceSize resourceBStart = resourceBOffset;
3228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3229  return resourceAEndPage == resourceBStartPage;
3230 }
3231 
3232 enum VmaSuballocationType
3233 {
3234  VMA_SUBALLOCATION_TYPE_FREE = 0,
3235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3241 };
3242 
3243 /*
3244 Returns true if given suballocation types could conflict and must respect
3245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3246 or linear image and another one is optimal image. If type is unknown, behave
3247 conservatively.
3248 */
3249 static inline bool VmaIsBufferImageGranularityConflict(
3250  VmaSuballocationType suballocType1,
3251  VmaSuballocationType suballocType2)
3252 {
3253  if(suballocType1 > suballocType2)
3254  {
3255  VMA_SWAP(suballocType1, suballocType2);
3256  }
3257 
3258  switch(suballocType1)
3259  {
3260  case VMA_SUBALLOCATION_TYPE_FREE:
3261  return false;
3262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3263  return true;
3264  case VMA_SUBALLOCATION_TYPE_BUFFER:
3265  return
3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3269  return
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3274  return
3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3277  return false;
3278  default:
3279  VMA_ASSERT(0);
3280  return true;
3281  }
3282 }
3283 
3284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3285 {
3286  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3287  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3288  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3289  {
3290  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3291  }
3292 }
3293 
3294 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3295 {
3296  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3297  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3298  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3299  {
3300  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3301  {
3302  return false;
3303  }
3304  }
3305  return true;
3306 }
3307 
3308 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3309 struct VmaMutexLock
3310 {
3311  VMA_CLASS_NO_COPY(VmaMutexLock)
3312 public:
3313  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3314  m_pMutex(useMutex ? &mutex : VMA_NULL)
3315  {
3316  if(m_pMutex)
3317  {
3318  m_pMutex->Lock();
3319  }
3320  }
3321 
3322  ~VmaMutexLock()
3323  {
3324  if(m_pMutex)
3325  {
3326  m_pMutex->Unlock();
3327  }
3328  }
3329 
3330 private:
3331  VMA_MUTEX* m_pMutex;
3332 };
3333 
3334 #if VMA_DEBUG_GLOBAL_MUTEX
3335  static VMA_MUTEX gDebugGlobalMutex;
3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3337 #else
3338  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3339 #endif
3340 
3341 // Minimum size of a free suballocation to register it in the free suballocation collection.
3342 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3343 
3344 /*
3345 Performs binary search and returns iterator to first element that is greater or
3346 equal to (key), according to comparison (cmp).
3347 
3348 Cmp should return true if first argument is less than second argument.
3349 
3350 Returned value is the found element, if present in the collection or place where
3351 new element with value (key) should be inserted.
3352 */
3353 template <typename CmpLess, typename IterT, typename KeyT>
3354 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3355 {
3356  size_t down = 0, up = (end - beg);
3357  while(down < up)
3358  {
3359  const size_t mid = (down + up) / 2;
3360  if(cmp(*(beg+mid), key))
3361  {
3362  down = mid + 1;
3363  }
3364  else
3365  {
3366  up = mid;
3367  }
3368  }
3369  return beg + down;
3370 }
3371 
3373 // Memory allocation
3374 
3375 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3376 {
3377  if((pAllocationCallbacks != VMA_NULL) &&
3378  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3379  {
3380  return (*pAllocationCallbacks->pfnAllocation)(
3381  pAllocationCallbacks->pUserData,
3382  size,
3383  alignment,
3384  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3385  }
3386  else
3387  {
3388  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3389  }
3390 }
3391 
3392 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3393 {
3394  if((pAllocationCallbacks != VMA_NULL) &&
3395  (pAllocationCallbacks->pfnFree != VMA_NULL))
3396  {
3397  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3398  }
3399  else
3400  {
3401  VMA_SYSTEM_FREE(ptr);
3402  }
3403 }
3404 
3405 template<typename T>
3406 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3407 {
3408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3409 }
3410 
3411 template<typename T>
3412 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3413 {
3414  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3415 }
3416 
3417 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3418 
3419 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3420 
3421 template<typename T>
3422 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3423 {
3424  ptr->~T();
3425  VmaFree(pAllocationCallbacks, ptr);
3426 }
3427 
3428 template<typename T>
3429 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3430 {
3431  if(ptr != VMA_NULL)
3432  {
3433  for(size_t i = count; i--; )
3434  {
3435  ptr[i].~T();
3436  }
3437  VmaFree(pAllocationCallbacks, ptr);
3438  }
3439 }
3440 
3441 // STL-compatible allocator.
3442 template<typename T>
3443 class VmaStlAllocator
3444 {
3445 public:
3446  const VkAllocationCallbacks* const m_pCallbacks;
3447  typedef T value_type;
3448 
3449  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3450  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3451 
3452  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3453  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3454 
3455  template<typename U>
3456  bool operator==(const VmaStlAllocator<U>& rhs) const
3457  {
3458  return m_pCallbacks == rhs.m_pCallbacks;
3459  }
3460  template<typename U>
3461  bool operator!=(const VmaStlAllocator<U>& rhs) const
3462  {
3463  return m_pCallbacks != rhs.m_pCallbacks;
3464  }
3465 
3466  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3467 };
3468 
3469 #if VMA_USE_STL_VECTOR
3470 
3471 #define VmaVector std::vector
3472 
3473 template<typename T, typename allocatorT>
3474 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3475 {
3476  vec.insert(vec.begin() + index, item);
3477 }
3478 
3479 template<typename T, typename allocatorT>
3480 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3481 {
3482  vec.erase(vec.begin() + index);
3483 }
3484 
3485 #else // #if VMA_USE_STL_VECTOR
3486 
3487 /* Class with interface compatible with subset of std::vector.
3488 T must be POD because constructors and destructors are not called and memcpy is
3489 used for these objects. */
3490 template<typename T, typename AllocatorT>
3491 class VmaVector
3492 {
3493 public:
3494  typedef T value_type;
3495 
3496  VmaVector(const AllocatorT& allocator) :
3497  m_Allocator(allocator),
3498  m_pArray(VMA_NULL),
3499  m_Count(0),
3500  m_Capacity(0)
3501  {
3502  }
3503 
3504  VmaVector(size_t count, const AllocatorT& allocator) :
3505  m_Allocator(allocator),
3506  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3507  m_Count(count),
3508  m_Capacity(count)
3509  {
3510  }
3511 
3512  VmaVector(const VmaVector<T, AllocatorT>& src) :
3513  m_Allocator(src.m_Allocator),
3514  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3515  m_Count(src.m_Count),
3516  m_Capacity(src.m_Count)
3517  {
3518  if(m_Count != 0)
3519  {
3520  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3521  }
3522  }
3523 
3524  ~VmaVector()
3525  {
3526  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3527  }
3528 
3529  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3530  {
3531  if(&rhs != this)
3532  {
3533  resize(rhs.m_Count);
3534  if(m_Count != 0)
3535  {
3536  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3537  }
3538  }
3539  return *this;
3540  }
3541 
3542  bool empty() const { return m_Count == 0; }
3543  size_t size() const { return m_Count; }
3544  T* data() { return m_pArray; }
3545  const T* data() const { return m_pArray; }
3546 
3547  T& operator[](size_t index)
3548  {
3549  VMA_HEAVY_ASSERT(index < m_Count);
3550  return m_pArray[index];
3551  }
3552  const T& operator[](size_t index) const
3553  {
3554  VMA_HEAVY_ASSERT(index < m_Count);
3555  return m_pArray[index];
3556  }
3557 
3558  T& front()
3559  {
3560  VMA_HEAVY_ASSERT(m_Count > 0);
3561  return m_pArray[0];
3562  }
3563  const T& front() const
3564  {
3565  VMA_HEAVY_ASSERT(m_Count > 0);
3566  return m_pArray[0];
3567  }
3568  T& back()
3569  {
3570  VMA_HEAVY_ASSERT(m_Count > 0);
3571  return m_pArray[m_Count - 1];
3572  }
3573  const T& back() const
3574  {
3575  VMA_HEAVY_ASSERT(m_Count > 0);
3576  return m_pArray[m_Count - 1];
3577  }
3578 
3579  void reserve(size_t newCapacity, bool freeMemory = false)
3580  {
3581  newCapacity = VMA_MAX(newCapacity, m_Count);
3582 
3583  if((newCapacity < m_Capacity) && !freeMemory)
3584  {
3585  newCapacity = m_Capacity;
3586  }
3587 
3588  if(newCapacity != m_Capacity)
3589  {
3590  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3591  if(m_Count != 0)
3592  {
3593  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3594  }
3595  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3596  m_Capacity = newCapacity;
3597  m_pArray = newArray;
3598  }
3599  }
3600 
3601  void resize(size_t newCount, bool freeMemory = false)
3602  {
3603  size_t newCapacity = m_Capacity;
3604  if(newCount > m_Capacity)
3605  {
3606  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3607  }
3608  else if(freeMemory)
3609  {
3610  newCapacity = newCount;
3611  }
3612 
3613  if(newCapacity != m_Capacity)
3614  {
3615  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3616  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3617  if(elementsToCopy != 0)
3618  {
3619  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3620  }
3621  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3622  m_Capacity = newCapacity;
3623  m_pArray = newArray;
3624  }
3625 
3626  m_Count = newCount;
3627  }
3628 
3629  void clear(bool freeMemory = false)
3630  {
3631  resize(0, freeMemory);
3632  }
3633 
3634  void insert(size_t index, const T& src)
3635  {
3636  VMA_HEAVY_ASSERT(index <= m_Count);
3637  const size_t oldCount = size();
3638  resize(oldCount + 1);
3639  if(index < oldCount)
3640  {
3641  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3642  }
3643  m_pArray[index] = src;
3644  }
3645 
3646  void remove(size_t index)
3647  {
3648  VMA_HEAVY_ASSERT(index < m_Count);
3649  const size_t oldCount = size();
3650  if(index < oldCount - 1)
3651  {
3652  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3653  }
3654  resize(oldCount - 1);
3655  }
3656 
3657  void push_back(const T& src)
3658  {
3659  const size_t newIndex = size();
3660  resize(newIndex + 1);
3661  m_pArray[newIndex] = src;
3662  }
3663 
3664  void pop_back()
3665  {
3666  VMA_HEAVY_ASSERT(m_Count > 0);
3667  resize(size() - 1);
3668  }
3669 
3670  void push_front(const T& src)
3671  {
3672  insert(0, src);
3673  }
3674 
3675  void pop_front()
3676  {
3677  VMA_HEAVY_ASSERT(m_Count > 0);
3678  remove(0);
3679  }
3680 
3681  typedef T* iterator;
3682 
3683  iterator begin() { return m_pArray; }
3684  iterator end() { return m_pArray + m_Count; }
3685 
3686 private:
3687  AllocatorT m_Allocator;
3688  T* m_pArray;
3689  size_t m_Count;
3690  size_t m_Capacity;
3691 };
3692 
3693 template<typename T, typename allocatorT>
3694 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3695 {
3696  vec.insert(index, item);
3697 }
3698 
3699 template<typename T, typename allocatorT>
3700 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3701 {
3702  vec.remove(index);
3703 }
3704 
3705 #endif // #if VMA_USE_STL_VECTOR
3706 
3707 template<typename CmpLess, typename VectorT>
3708 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3709 {
3710  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3711  vector.data(),
3712  vector.data() + vector.size(),
3713  value,
3714  CmpLess()) - vector.data();
3715  VmaVectorInsert(vector, indexToInsert, value);
3716  return indexToInsert;
3717 }
3718 
3719 template<typename CmpLess, typename VectorT>
3720 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3721 {
3722  CmpLess comparator;
3723  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3724  vector.begin(),
3725  vector.end(),
3726  value,
3727  comparator);
3728  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3729  {
3730  size_t indexToRemove = it - vector.begin();
3731  VmaVectorRemove(vector, indexToRemove);
3732  return true;
3733  }
3734  return false;
3735 }
3736 
3737 template<typename CmpLess, typename IterT, typename KeyT>
3738 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3739 {
3740  CmpLess comparator;
3741  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3742  beg, end, value, comparator);
3743  if(it == end ||
3744  (!comparator(*it, value) && !comparator(value, *it)))
3745  {
3746  return it;
3747  }
3748  return end;
3749 }
3750 
3752 // class VmaPoolAllocator
3753 
3754 /*
3755 Allocator for objects of type T using a list of arrays (pools) to speed up
3756 allocation. Number of elements that can be allocated is not bounded because
3757 allocator can create multiple blocks.
3758 */
3759 template<typename T>
3760 class VmaPoolAllocator
3761 {
3762  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3763 public:
3764  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3765  ~VmaPoolAllocator();
3766  void Clear();
3767  T* Alloc();
3768  void Free(T* ptr);
3769 
3770 private:
3771  union Item
3772  {
3773  uint32_t NextFreeIndex;
3774  T Value;
3775  };
3776 
3777  struct ItemBlock
3778  {
3779  Item* pItems;
3780  uint32_t FirstFreeIndex;
3781  };
3782 
3783  const VkAllocationCallbacks* m_pAllocationCallbacks;
3784  size_t m_ItemsPerBlock;
3785  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3786 
3787  ItemBlock& CreateNewBlock();
3788 };
3789 
3790 template<typename T>
3791 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3792  m_pAllocationCallbacks(pAllocationCallbacks),
3793  m_ItemsPerBlock(itemsPerBlock),
3794  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3795 {
3796  VMA_ASSERT(itemsPerBlock > 0);
3797 }
3798 
3799 template<typename T>
3800 VmaPoolAllocator<T>::~VmaPoolAllocator()
3801 {
3802  Clear();
3803 }
3804 
3805 template<typename T>
3806 void VmaPoolAllocator<T>::Clear()
3807 {
3808  for(size_t i = m_ItemBlocks.size(); i--; )
3809  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3810  m_ItemBlocks.clear();
3811 }
3812 
3813 template<typename T>
3814 T* VmaPoolAllocator<T>::Alloc()
3815 {
3816  for(size_t i = m_ItemBlocks.size(); i--; )
3817  {
3818  ItemBlock& block = m_ItemBlocks[i];
3819  // This block has some free items: Use first one.
3820  if(block.FirstFreeIndex != UINT32_MAX)
3821  {
3822  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3823  block.FirstFreeIndex = pItem->NextFreeIndex;
3824  return &pItem->Value;
3825  }
3826  }
3827 
3828  // No block has free item: Create new one and use it.
3829  ItemBlock& newBlock = CreateNewBlock();
3830  Item* const pItem = &newBlock.pItems[0];
3831  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3832  return &pItem->Value;
3833 }
3834 
3835 template<typename T>
3836 void VmaPoolAllocator<T>::Free(T* ptr)
3837 {
3838  // Search all memory blocks to find ptr.
3839  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3840  {
3841  ItemBlock& block = m_ItemBlocks[i];
3842 
3843  // Casting to union.
3844  Item* pItemPtr;
3845  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3846 
3847  // Check if pItemPtr is in address range of this block.
3848  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3849  {
3850  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3851  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3852  block.FirstFreeIndex = index;
3853  return;
3854  }
3855  }
3856  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3857 }
3858 
3859 template<typename T>
3860 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3861 {
3862  ItemBlock newBlock = {
3863  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3864 
3865  m_ItemBlocks.push_back(newBlock);
3866 
3867  // Setup singly-linked list of all free items in this block.
3868  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3869  newBlock.pItems[i].NextFreeIndex = i + 1;
3870  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3871  return m_ItemBlocks.back();
3872 }
3873 
3875 // class VmaRawList, VmaList
3876 
3877 #if VMA_USE_STL_LIST
3878 
3879 #define VmaList std::list
3880 
3881 #else // #if VMA_USE_STL_LIST
3882 
3883 template<typename T>
3884 struct VmaListItem
3885 {
3886  VmaListItem* pPrev;
3887  VmaListItem* pNext;
3888  T Value;
3889 };
3890 
3891 // Doubly linked list.
3892 template<typename T>
3893 class VmaRawList
3894 {
3895  VMA_CLASS_NO_COPY(VmaRawList)
3896 public:
3897  typedef VmaListItem<T> ItemType;
3898 
3899  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3900  ~VmaRawList();
3901  void Clear();
3902 
3903  size_t GetCount() const { return m_Count; }
3904  bool IsEmpty() const { return m_Count == 0; }
3905 
3906  ItemType* Front() { return m_pFront; }
3907  const ItemType* Front() const { return m_pFront; }
3908  ItemType* Back() { return m_pBack; }
3909  const ItemType* Back() const { return m_pBack; }
3910 
3911  ItemType* PushBack();
3912  ItemType* PushFront();
3913  ItemType* PushBack(const T& value);
3914  ItemType* PushFront(const T& value);
3915  void PopBack();
3916  void PopFront();
3917 
3918  // Item can be null - it means PushBack.
3919  ItemType* InsertBefore(ItemType* pItem);
3920  // Item can be null - it means PushFront.
3921  ItemType* InsertAfter(ItemType* pItem);
3922 
3923  ItemType* InsertBefore(ItemType* pItem, const T& value);
3924  ItemType* InsertAfter(ItemType* pItem, const T& value);
3925 
3926  void Remove(ItemType* pItem);
3927 
3928 private:
3929  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3930  VmaPoolAllocator<ItemType> m_ItemAllocator;
3931  ItemType* m_pFront;
3932  ItemType* m_pBack;
3933  size_t m_Count;
3934 };
3935 
3936 template<typename T>
3937 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3938  m_pAllocationCallbacks(pAllocationCallbacks),
3939  m_ItemAllocator(pAllocationCallbacks, 128),
3940  m_pFront(VMA_NULL),
3941  m_pBack(VMA_NULL),
3942  m_Count(0)
3943 {
3944 }
3945 
3946 template<typename T>
3947 VmaRawList<T>::~VmaRawList()
3948 {
3949  // Intentionally not calling Clear, because that would be unnecessary
3950  // computations to return all items to m_ItemAllocator as free.
3951 }
3952 
3953 template<typename T>
3954 void VmaRawList<T>::Clear()
3955 {
3956  if(IsEmpty() == false)
3957  {
3958  ItemType* pItem = m_pBack;
3959  while(pItem != VMA_NULL)
3960  {
3961  ItemType* const pPrevItem = pItem->pPrev;
3962  m_ItemAllocator.Free(pItem);
3963  pItem = pPrevItem;
3964  }
3965  m_pFront = VMA_NULL;
3966  m_pBack = VMA_NULL;
3967  m_Count = 0;
3968  }
3969 }
3970 
3971 template<typename T>
3972 VmaListItem<T>* VmaRawList<T>::PushBack()
3973 {
3974  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3975  pNewItem->pNext = VMA_NULL;
3976  if(IsEmpty())
3977  {
3978  pNewItem->pPrev = VMA_NULL;
3979  m_pFront = pNewItem;
3980  m_pBack = pNewItem;
3981  m_Count = 1;
3982  }
3983  else
3984  {
3985  pNewItem->pPrev = m_pBack;
3986  m_pBack->pNext = pNewItem;
3987  m_pBack = pNewItem;
3988  ++m_Count;
3989  }
3990  return pNewItem;
3991 }
3992 
3993 template<typename T>
3994 VmaListItem<T>* VmaRawList<T>::PushFront()
3995 {
3996  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3997  pNewItem->pPrev = VMA_NULL;
3998  if(IsEmpty())
3999  {
4000  pNewItem->pNext = VMA_NULL;
4001  m_pFront = pNewItem;
4002  m_pBack = pNewItem;
4003  m_Count = 1;
4004  }
4005  else
4006  {
4007  pNewItem->pNext = m_pFront;
4008  m_pFront->pPrev = pNewItem;
4009  m_pFront = pNewItem;
4010  ++m_Count;
4011  }
4012  return pNewItem;
4013 }
4014 
4015 template<typename T>
4016 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4017 {
4018  ItemType* const pNewItem = PushBack();
4019  pNewItem->Value = value;
4020  return pNewItem;
4021 }
4022 
4023 template<typename T>
4024 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4025 {
4026  ItemType* const pNewItem = PushFront();
4027  pNewItem->Value = value;
4028  return pNewItem;
4029 }
4030 
4031 template<typename T>
4032 void VmaRawList<T>::PopBack()
4033 {
4034  VMA_HEAVY_ASSERT(m_Count > 0);
4035  ItemType* const pBackItem = m_pBack;
4036  ItemType* const pPrevItem = pBackItem->pPrev;
4037  if(pPrevItem != VMA_NULL)
4038  {
4039  pPrevItem->pNext = VMA_NULL;
4040  }
4041  m_pBack = pPrevItem;
4042  m_ItemAllocator.Free(pBackItem);
4043  --m_Count;
4044 }
4045 
4046 template<typename T>
4047 void VmaRawList<T>::PopFront()
4048 {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  ItemType* const pFrontItem = m_pFront;
4051  ItemType* const pNextItem = pFrontItem->pNext;
4052  if(pNextItem != VMA_NULL)
4053  {
4054  pNextItem->pPrev = VMA_NULL;
4055  }
4056  m_pFront = pNextItem;
4057  m_ItemAllocator.Free(pFrontItem);
4058  --m_Count;
4059 }
4060 
4061 template<typename T>
4062 void VmaRawList<T>::Remove(ItemType* pItem)
4063 {
4064  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4065  VMA_HEAVY_ASSERT(m_Count > 0);
4066 
4067  if(pItem->pPrev != VMA_NULL)
4068  {
4069  pItem->pPrev->pNext = pItem->pNext;
4070  }
4071  else
4072  {
4073  VMA_HEAVY_ASSERT(m_pFront == pItem);
4074  m_pFront = pItem->pNext;
4075  }
4076 
4077  if(pItem->pNext != VMA_NULL)
4078  {
4079  pItem->pNext->pPrev = pItem->pPrev;
4080  }
4081  else
4082  {
4083  VMA_HEAVY_ASSERT(m_pBack == pItem);
4084  m_pBack = pItem->pPrev;
4085  }
4086 
4087  m_ItemAllocator.Free(pItem);
4088  --m_Count;
4089 }
4090 
4091 template<typename T>
4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4093 {
4094  if(pItem != VMA_NULL)
4095  {
4096  ItemType* const prevItem = pItem->pPrev;
4097  ItemType* const newItem = m_ItemAllocator.Alloc();
4098  newItem->pPrev = prevItem;
4099  newItem->pNext = pItem;
4100  pItem->pPrev = newItem;
4101  if(prevItem != VMA_NULL)
4102  {
4103  prevItem->pNext = newItem;
4104  }
4105  else
4106  {
4107  VMA_HEAVY_ASSERT(m_pFront == pItem);
4108  m_pFront = newItem;
4109  }
4110  ++m_Count;
4111  return newItem;
4112  }
4113  else
4114  return PushBack();
4115 }
4116 
4117 template<typename T>
4118 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4119 {
4120  if(pItem != VMA_NULL)
4121  {
4122  ItemType* const nextItem = pItem->pNext;
4123  ItemType* const newItem = m_ItemAllocator.Alloc();
4124  newItem->pNext = nextItem;
4125  newItem->pPrev = pItem;
4126  pItem->pNext = newItem;
4127  if(nextItem != VMA_NULL)
4128  {
4129  nextItem->pPrev = newItem;
4130  }
4131  else
4132  {
4133  VMA_HEAVY_ASSERT(m_pBack == pItem);
4134  m_pBack = newItem;
4135  }
4136  ++m_Count;
4137  return newItem;
4138  }
4139  else
4140  return PushFront();
4141 }
4142 
4143 template<typename T>
4144 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4145 {
4146  ItemType* const newItem = InsertBefore(pItem);
4147  newItem->Value = value;
4148  return newItem;
4149 }
4150 
4151 template<typename T>
4152 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4153 {
4154  ItemType* const newItem = InsertAfter(pItem);
4155  newItem->Value = value;
4156  return newItem;
4157 }
4158 
4159 template<typename T, typename AllocatorT>
4160 class VmaList
4161 {
4162  VMA_CLASS_NO_COPY(VmaList)
4163 public:
4164  class iterator
4165  {
4166  public:
4167  iterator() :
4168  m_pList(VMA_NULL),
4169  m_pItem(VMA_NULL)
4170  {
4171  }
4172 
4173  T& operator*() const
4174  {
4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4176  return m_pItem->Value;
4177  }
4178  T* operator->() const
4179  {
4180  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4181  return &m_pItem->Value;
4182  }
4183 
4184  iterator& operator++()
4185  {
4186  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4187  m_pItem = m_pItem->pNext;
4188  return *this;
4189  }
4190  iterator& operator--()
4191  {
4192  if(m_pItem != VMA_NULL)
4193  {
4194  m_pItem = m_pItem->pPrev;
4195  }
4196  else
4197  {
4198  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4199  m_pItem = m_pList->Back();
4200  }
4201  return *this;
4202  }
4203 
4204  iterator operator++(int)
4205  {
4206  iterator result = *this;
4207  ++*this;
4208  return result;
4209  }
4210  iterator operator--(int)
4211  {
4212  iterator result = *this;
4213  --*this;
4214  return result;
4215  }
4216 
4217  bool operator==(const iterator& rhs) const
4218  {
4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4220  return m_pItem == rhs.m_pItem;
4221  }
4222  bool operator!=(const iterator& rhs) const
4223  {
4224  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4225  return m_pItem != rhs.m_pItem;
4226  }
4227 
4228  private:
4229  VmaRawList<T>* m_pList;
4230  VmaListItem<T>* m_pItem;
4231 
4232  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4233  m_pList(pList),
4234  m_pItem(pItem)
4235  {
4236  }
4237 
4238  friend class VmaList<T, AllocatorT>;
4239  };
4240 
4241  class const_iterator
4242  {
4243  public:
4244  const_iterator() :
4245  m_pList(VMA_NULL),
4246  m_pItem(VMA_NULL)
4247  {
4248  }
4249 
4250  const_iterator(const iterator& src) :
4251  m_pList(src.m_pList),
4252  m_pItem(src.m_pItem)
4253  {
4254  }
4255 
4256  const T& operator*() const
4257  {
4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4259  return m_pItem->Value;
4260  }
4261  const T* operator->() const
4262  {
4263  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4264  return &m_pItem->Value;
4265  }
4266 
4267  const_iterator& operator++()
4268  {
4269  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4270  m_pItem = m_pItem->pNext;
4271  return *this;
4272  }
4273  const_iterator& operator--()
4274  {
4275  if(m_pItem != VMA_NULL)
4276  {
4277  m_pItem = m_pItem->pPrev;
4278  }
4279  else
4280  {
4281  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4282  m_pItem = m_pList->Back();
4283  }
4284  return *this;
4285  }
4286 
4287  const_iterator operator++(int)
4288  {
4289  const_iterator result = *this;
4290  ++*this;
4291  return result;
4292  }
4293  const_iterator operator--(int)
4294  {
4295  const_iterator result = *this;
4296  --*this;
4297  return result;
4298  }
4299 
4300  bool operator==(const const_iterator& rhs) const
4301  {
4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4303  return m_pItem == rhs.m_pItem;
4304  }
4305  bool operator!=(const const_iterator& rhs) const
4306  {
4307  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4308  return m_pItem != rhs.m_pItem;
4309  }
4310 
4311  private:
4312  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4313  m_pList(pList),
4314  m_pItem(pItem)
4315  {
4316  }
4317 
4318  const VmaRawList<T>* m_pList;
4319  const VmaListItem<T>* m_pItem;
4320 
4321  friend class VmaList<T, AllocatorT>;
4322  };
4323 
4324  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4325 
4326  bool empty() const { return m_RawList.IsEmpty(); }
4327  size_t size() const { return m_RawList.GetCount(); }
4328 
4329  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4330  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4331 
4332  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4333  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4334 
4335  void clear() { m_RawList.Clear(); }
4336  void push_back(const T& value) { m_RawList.PushBack(value); }
4337  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4338  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4339 
4340 private:
4341  VmaRawList<T> m_RawList;
4342 };
4343 
4344 #endif // #if VMA_USE_STL_LIST
4345 
4347 // class VmaMap
4348 
4349 // Unused in this version.
4350 #if 0
4351 
4352 #if VMA_USE_STL_UNORDERED_MAP
4353 
4354 #define VmaPair std::pair
4355 
4356 #define VMA_MAP_TYPE(KeyT, ValueT) \
4357  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4358 
4359 #else // #if VMA_USE_STL_UNORDERED_MAP
4360 
4361 template<typename T1, typename T2>
4362 struct VmaPair
4363 {
4364  T1 first;
4365  T2 second;
4366 
4367  VmaPair() : first(), second() { }
4368  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4369 };
4370 
4371 /* Class compatible with subset of interface of std::unordered_map.
4372 KeyT, ValueT must be POD because they will be stored in VmaVector.
4373 */
4374 template<typename KeyT, typename ValueT>
4375 class VmaMap
4376 {
4377 public:
4378  typedef VmaPair<KeyT, ValueT> PairType;
4379  typedef PairType* iterator;
4380 
4381  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4382 
4383  iterator begin() { return m_Vector.begin(); }
4384  iterator end() { return m_Vector.end(); }
4385 
4386  void insert(const PairType& pair);
4387  iterator find(const KeyT& key);
4388  void erase(iterator it);
4389 
4390 private:
4391  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4392 };
4393 
4394 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4395 
4396 template<typename FirstT, typename SecondT>
4397 struct VmaPairFirstLess
4398 {
4399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4400  {
4401  return lhs.first < rhs.first;
4402  }
4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4404  {
4405  return lhs.first < rhsFirst;
4406  }
4407 };
4408 
4409 template<typename KeyT, typename ValueT>
4410 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4411 {
4412  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4413  m_Vector.data(),
4414  m_Vector.data() + m_Vector.size(),
4415  pair,
4416  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4417  VmaVectorInsert(m_Vector, indexToInsert, pair);
4418 }
4419 
4420 template<typename KeyT, typename ValueT>
4421 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4422 {
4423  PairType* it = VmaBinaryFindFirstNotLess(
4424  m_Vector.data(),
4425  m_Vector.data() + m_Vector.size(),
4426  key,
4427  VmaPairFirstLess<KeyT, ValueT>());
4428  if((it != m_Vector.end()) && (it->first == key))
4429  {
4430  return it;
4431  }
4432  else
4433  {
4434  return m_Vector.end();
4435  }
4436 }
4437 
4438 template<typename KeyT, typename ValueT>
4439 void VmaMap<KeyT, ValueT>::erase(iterator it)
4440 {
4441  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4442 }
4443 
4444 #endif // #if VMA_USE_STL_UNORDERED_MAP
4445 
4446 #endif // #if 0
4447 
4449 
4450 class VmaDeviceMemoryBlock;
4451 
4452 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4453 
4454 struct VmaAllocation_T
4455 {
4456  VMA_CLASS_NO_COPY(VmaAllocation_T)
4457 private:
4458  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4459 
4460  enum FLAGS
4461  {
4462  FLAG_USER_DATA_STRING = 0x01,
4463  };
4464 
4465 public:
4466  enum ALLOCATION_TYPE
4467  {
4468  ALLOCATION_TYPE_NONE,
4469  ALLOCATION_TYPE_BLOCK,
4470  ALLOCATION_TYPE_DEDICATED,
4471  };
4472 
4473  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4474  m_Alignment(1),
4475  m_Size(0),
4476  m_pUserData(VMA_NULL),
4477  m_LastUseFrameIndex(currentFrameIndex),
4478  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4479  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4480  m_MapCount(0),
4481  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4482  {
4483 #if VMA_STATS_STRING_ENABLED
4484  m_CreationFrameIndex = currentFrameIndex;
4485  m_BufferImageUsage = 0;
4486 #endif
4487  }
4488 
4489  ~VmaAllocation_T()
4490  {
4491  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4492 
4493  // Check if owned string was freed.
4494  VMA_ASSERT(m_pUserData == VMA_NULL);
4495  }
4496 
4497  void InitBlockAllocation(
4498  VmaPool hPool,
4499  VmaDeviceMemoryBlock* block,
4500  VkDeviceSize offset,
4501  VkDeviceSize alignment,
4502  VkDeviceSize size,
4503  VmaSuballocationType suballocationType,
4504  bool mapped,
4505  bool canBecomeLost)
4506  {
4507  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4508  VMA_ASSERT(block != VMA_NULL);
4509  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4510  m_Alignment = alignment;
4511  m_Size = size;
4512  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4513  m_SuballocationType = (uint8_t)suballocationType;
4514  m_BlockAllocation.m_hPool = hPool;
4515  m_BlockAllocation.m_Block = block;
4516  m_BlockAllocation.m_Offset = offset;
4517  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4518  }
4519 
4520  void InitLost()
4521  {
4522  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4523  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4524  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4525  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4526  m_BlockAllocation.m_Block = VMA_NULL;
4527  m_BlockAllocation.m_Offset = 0;
4528  m_BlockAllocation.m_CanBecomeLost = true;
4529  }
4530 
4531  void ChangeBlockAllocation(
4532  VmaAllocator hAllocator,
4533  VmaDeviceMemoryBlock* block,
4534  VkDeviceSize offset);
4535 
4536  void ChangeSize(VkDeviceSize newSize);
4537 
4538  // pMappedData not null means allocation is created with MAPPED flag.
4539  void InitDedicatedAllocation(
4540  uint32_t memoryTypeIndex,
4541  VkDeviceMemory hMemory,
4542  VmaSuballocationType suballocationType,
4543  void* pMappedData,
4544  VkDeviceSize size)
4545  {
4546  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4547  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4548  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4549  m_Alignment = 0;
4550  m_Size = size;
4551  m_SuballocationType = (uint8_t)suballocationType;
4552  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4553  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4554  m_DedicatedAllocation.m_hMemory = hMemory;
4555  m_DedicatedAllocation.m_pMappedData = pMappedData;
4556  }
4557 
4558  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4559  VkDeviceSize GetAlignment() const { return m_Alignment; }
4560  VkDeviceSize GetSize() const { return m_Size; }
4561  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4562  void* GetUserData() const { return m_pUserData; }
4563  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4564  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4565 
4566  VmaDeviceMemoryBlock* GetBlock() const
4567  {
4568  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4569  return m_BlockAllocation.m_Block;
4570  }
4571  VkDeviceSize GetOffset() const;
4572  VkDeviceMemory GetMemory() const;
4573  uint32_t GetMemoryTypeIndex() const;
4574  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4575  void* GetMappedData() const;
4576  bool CanBecomeLost() const;
4577  VmaPool GetPool() const;
4578 
4579  uint32_t GetLastUseFrameIndex() const
4580  {
4581  return m_LastUseFrameIndex.load();
4582  }
4583  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4584  {
4585  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4586  }
4587  /*
4588  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4589  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4590  - Else, returns false.
4591 
4592  If hAllocation is already lost, assert - you should not call it then.
4593  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4594  */
4595  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4596 
4597  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4598  {
4599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4600  outInfo.blockCount = 1;
4601  outInfo.allocationCount = 1;
4602  outInfo.unusedRangeCount = 0;
4603  outInfo.usedBytes = m_Size;
4604  outInfo.unusedBytes = 0;
4605  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4606  outInfo.unusedRangeSizeMin = UINT64_MAX;
4607  outInfo.unusedRangeSizeMax = 0;
4608  }
4609 
4610  void BlockAllocMap();
4611  void BlockAllocUnmap();
4612  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4613  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4614 
4615 #if VMA_STATS_STRING_ENABLED
4616  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4617  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4618 
4619  void InitBufferImageUsage(uint32_t bufferImageUsage)
4620  {
4621  VMA_ASSERT(m_BufferImageUsage == 0);
4622  m_BufferImageUsage = bufferImageUsage;
4623  }
4624 
4625  void PrintParameters(class VmaJsonWriter& json) const;
4626 #endif
4627 
4628 private:
4629  VkDeviceSize m_Alignment;
4630  VkDeviceSize m_Size;
4631  void* m_pUserData;
4632  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4633  uint8_t m_Type; // ALLOCATION_TYPE
4634  uint8_t m_SuballocationType; // VmaSuballocationType
4635  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4636  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4637  uint8_t m_MapCount;
4638  uint8_t m_Flags; // enum FLAGS
4639 
4640  // Allocation out of VmaDeviceMemoryBlock.
4641  struct BlockAllocation
4642  {
4643  VmaPool m_hPool; // Null if belongs to general memory.
4644  VmaDeviceMemoryBlock* m_Block;
4645  VkDeviceSize m_Offset;
4646  bool m_CanBecomeLost;
4647  };
4648 
4649  // Allocation for an object that has its own private VkDeviceMemory.
4650  struct DedicatedAllocation
4651  {
4652  uint32_t m_MemoryTypeIndex;
4653  VkDeviceMemory m_hMemory;
4654  void* m_pMappedData; // Not null means memory is mapped.
4655  };
4656 
4657  union
4658  {
4659  // Allocation out of VmaDeviceMemoryBlock.
4660  BlockAllocation m_BlockAllocation;
4661  // Allocation for an object that has its own private VkDeviceMemory.
4662  DedicatedAllocation m_DedicatedAllocation;
4663  };
4664 
4665 #if VMA_STATS_STRING_ENABLED
4666  uint32_t m_CreationFrameIndex;
4667  uint32_t m_BufferImageUsage; // 0 if unknown.
4668 #endif
4669 
4670  void FreeUserDataString(VmaAllocator hAllocator);
4671 };
4672 
4673 /*
4674 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4675 allocated memory block or free.
4676 */
4677 struct VmaSuballocation
4678 {
4679  VkDeviceSize offset;
4680  VkDeviceSize size;
4681  VmaAllocation hAllocation;
4682  VmaSuballocationType type;
4683 };
4684 
4685 // Comparator for offsets.
4686 struct VmaSuballocationOffsetLess
4687 {
4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4689  {
4690  return lhs.offset < rhs.offset;
4691  }
4692 };
4693 struct VmaSuballocationOffsetGreater
4694 {
4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4696  {
4697  return lhs.offset > rhs.offset;
4698  }
4699 };
4700 
4701 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4702 
4703 // Cost of one additional allocation lost, as equivalent in bytes.
4704 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4705 
4706 /*
4707 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4708 
4709 If canMakeOtherLost was false:
4710 - item points to a FREE suballocation.
4711 - itemsToMakeLostCount is 0.
4712 
4713 If canMakeOtherLost was true:
4714 - item points to first of sequence of suballocations, which are either FREE,
4715  or point to VmaAllocations that can become lost.
4716 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4717  the requested allocation to succeed.
4718 */
4719 struct VmaAllocationRequest
4720 {
4721  VkDeviceSize offset;
4722  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4723  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4724  VmaSuballocationList::iterator item;
4725  size_t itemsToMakeLostCount;
4726  void* customData;
4727 
4728  VkDeviceSize CalcCost() const
4729  {
4730  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4731  }
4732 };
4733 
4734 /*
4735 Data structure used for bookkeeping of allocations and unused ranges of memory
4736 in a single VkDeviceMemory block.
4737 */
4738 class VmaBlockMetadata
4739 {
4740 public:
4741  VmaBlockMetadata(VmaAllocator hAllocator);
4742  virtual ~VmaBlockMetadata() { }
4743  virtual void Init(VkDeviceSize size) { m_Size = size; }
4744 
4745  // Validates all data structures inside this object. If not valid, returns false.
4746  virtual bool Validate() const = 0;
4747  VkDeviceSize GetSize() const { return m_Size; }
4748  virtual size_t GetAllocationCount() const = 0;
4749  virtual VkDeviceSize GetSumFreeSize() const = 0;
4750  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4751  // Returns true if this block is empty - contains only single free suballocation.
4752  virtual bool IsEmpty() const = 0;
4753 
4754  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4755  // Shouldn't modify blockCount.
4756  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4757 
4758 #if VMA_STATS_STRING_ENABLED
4759  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4760 #endif
4761 
4762  // Tries to find a place for suballocation with given parameters inside this block.
4763  // If succeeded, fills pAllocationRequest and returns true.
4764  // If failed, returns false.
4765  virtual bool CreateAllocationRequest(
4766  uint32_t currentFrameIndex,
4767  uint32_t frameInUseCount,
4768  VkDeviceSize bufferImageGranularity,
4769  VkDeviceSize allocSize,
4770  VkDeviceSize allocAlignment,
4771  bool upperAddress,
4772  VmaSuballocationType allocType,
4773  bool canMakeOtherLost,
4774  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4775  VmaAllocationRequest* pAllocationRequest) = 0;
4776 
4777  virtual bool MakeRequestedAllocationsLost(
4778  uint32_t currentFrameIndex,
4779  uint32_t frameInUseCount,
4780  VmaAllocationRequest* pAllocationRequest) = 0;
4781 
4782  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4783 
4784  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4785 
4786  // Makes actual allocation based on request. Request must already be checked and valid.
4787  virtual void Alloc(
4788  const VmaAllocationRequest& request,
4789  VmaSuballocationType type,
4790  VkDeviceSize allocSize,
4791  bool upperAddress,
4792  VmaAllocation hAllocation) = 0;
4793 
4794  // Frees suballocation assigned to given memory region.
4795  virtual void Free(const VmaAllocation allocation) = 0;
4796  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4797 
4798  // Tries to resize (grow or shrink) space for given allocation, in place.
4799  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4800 
4801 protected:
4802  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4803 
4804 #if VMA_STATS_STRING_ENABLED
4805  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4806  VkDeviceSize unusedBytes,
4807  size_t allocationCount,
4808  size_t unusedRangeCount) const;
4809  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4810  VkDeviceSize offset,
4811  VmaAllocation hAllocation) const;
4812  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4813  VkDeviceSize offset,
4814  VkDeviceSize size) const;
4815  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4816 #endif
4817 
4818 private:
4819  VkDeviceSize m_Size;
4820  const VkAllocationCallbacks* m_pAllocationCallbacks;
4821 };
4822 
4823 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4824  VMA_ASSERT(0 && "Validation failed: " #cond); \
4825  return false; \
4826  } } while(false)
4827 
4828 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4829 {
4830  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4831 public:
4832  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4833  virtual ~VmaBlockMetadata_Generic();
4834  virtual void Init(VkDeviceSize size);
4835 
4836  virtual bool Validate() const;
4837  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4838  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4839  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4840  virtual bool IsEmpty() const;
4841 
4842  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4843  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4844 
4845 #if VMA_STATS_STRING_ENABLED
4846  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4847 #endif
4848 
4849  virtual bool CreateAllocationRequest(
4850  uint32_t currentFrameIndex,
4851  uint32_t frameInUseCount,
4852  VkDeviceSize bufferImageGranularity,
4853  VkDeviceSize allocSize,
4854  VkDeviceSize allocAlignment,
4855  bool upperAddress,
4856  VmaSuballocationType allocType,
4857  bool canMakeOtherLost,
4858  uint32_t strategy,
4859  VmaAllocationRequest* pAllocationRequest);
4860 
4861  virtual bool MakeRequestedAllocationsLost(
4862  uint32_t currentFrameIndex,
4863  uint32_t frameInUseCount,
4864  VmaAllocationRequest* pAllocationRequest);
4865 
4866  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4867 
4868  virtual VkResult CheckCorruption(const void* pBlockData);
4869 
4870  virtual void Alloc(
4871  const VmaAllocationRequest& request,
4872  VmaSuballocationType type,
4873  VkDeviceSize allocSize,
4874  bool upperAddress,
4875  VmaAllocation hAllocation);
4876 
4877  virtual void Free(const VmaAllocation allocation);
4878  virtual void FreeAtOffset(VkDeviceSize offset);
4879 
4880  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4881 
4882 private:
4883  uint32_t m_FreeCount;
4884  VkDeviceSize m_SumFreeSize;
4885  VmaSuballocationList m_Suballocations;
4886  // Suballocations that are free and have size greater than certain threshold.
4887  // Sorted by size, ascending.
4888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4889 
4890  bool ValidateFreeSuballocationList() const;
4891 
4892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4893  // If yes, fills pOffset and returns true. If no, returns false.
4894  bool CheckAllocation(
4895  uint32_t currentFrameIndex,
4896  uint32_t frameInUseCount,
4897  VkDeviceSize bufferImageGranularity,
4898  VkDeviceSize allocSize,
4899  VkDeviceSize allocAlignment,
4900  VmaSuballocationType allocType,
4901  VmaSuballocationList::const_iterator suballocItem,
4902  bool canMakeOtherLost,
4903  VkDeviceSize* pOffset,
4904  size_t* itemsToMakeLostCount,
4905  VkDeviceSize* pSumFreeSize,
4906  VkDeviceSize* pSumItemSize) const;
4907  // Given free suballocation, it merges it with following one, which must also be free.
4908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4909  // Releases given suballocation, making it free.
4910  // Merges it with adjacent free suballocations if applicable.
4911  // Returns iterator to new free suballocation at this place.
4912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4913  // Given free suballocation, it inserts it into sorted list of
4914  // m_FreeSuballocationsBySize if it's suitable.
4915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4916  // Given free suballocation, it removes it from sorted list of
4917  // m_FreeSuballocationsBySize if it's suitable.
4918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4919 };
4920 
4921 /*
4922 Allocations and their references in internal data structure look like this:
4923 
4924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4925 
4926  0 +-------+
4927  | |
4928  | |
4929  | |
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount]
4932  +-------+
4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4934  +-------+
4935  | ... |
4936  +-------+
4937  | Alloc | 1st[1st.size() - 1]
4938  +-------+
4939  | |
4940  | |
4941  | |
4942 GetSize() +-------+
4943 
4944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4945 
4946  0 +-------+
4947  | Alloc | 2nd[0]
4948  +-------+
4949  | Alloc | 2nd[1]
4950  +-------+
4951  | ... |
4952  +-------+
4953  | Alloc | 2nd[2nd.size() - 1]
4954  +-------+
4955  | |
4956  | |
4957  | |
4958  +-------+
4959  | Alloc | 1st[m_1stNullItemsBeginCount]
4960  +-------+
4961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4962  +-------+
4963  | ... |
4964  +-------+
4965  | Alloc | 1st[1st.size() - 1]
4966  +-------+
4967  | |
4968 GetSize() +-------+
4969 
4970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4971 
4972  0 +-------+
4973  | |
4974  | |
4975  | |
4976  +-------+
4977  | Alloc | 1st[m_1stNullItemsBeginCount]
4978  +-------+
4979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4980  +-------+
4981  | ... |
4982  +-------+
4983  | Alloc | 1st[1st.size() - 1]
4984  +-------+
4985  | |
4986  | |
4987  | |
4988  +-------+
4989  | Alloc | 2nd[2nd.size() - 1]
4990  +-------+
4991  | ... |
4992  +-------+
4993  | Alloc | 2nd[1]
4994  +-------+
4995  | Alloc | 2nd[0]
4996 GetSize() +-------+
4997 
4998 */
4999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5000 {
5001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5002 public:
5003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5004  virtual ~VmaBlockMetadata_Linear();
5005  virtual void Init(VkDeviceSize size);
5006 
5007  virtual bool Validate() const;
5008  virtual size_t GetAllocationCount() const;
5009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5012 
5013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5015 
5016 #if VMA_STATS_STRING_ENABLED
5017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5018 #endif
5019 
5020  virtual bool CreateAllocationRequest(
5021  uint32_t currentFrameIndex,
5022  uint32_t frameInUseCount,
5023  VkDeviceSize bufferImageGranularity,
5024  VkDeviceSize allocSize,
5025  VkDeviceSize allocAlignment,
5026  bool upperAddress,
5027  VmaSuballocationType allocType,
5028  bool canMakeOtherLost,
5029  uint32_t strategy,
5030  VmaAllocationRequest* pAllocationRequest);
5031 
5032  virtual bool MakeRequestedAllocationsLost(
5033  uint32_t currentFrameIndex,
5034  uint32_t frameInUseCount,
5035  VmaAllocationRequest* pAllocationRequest);
5036 
5037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5038 
5039  virtual VkResult CheckCorruption(const void* pBlockData);
5040 
5041  virtual void Alloc(
5042  const VmaAllocationRequest& request,
5043  VmaSuballocationType type,
5044  VkDeviceSize allocSize,
5045  bool upperAddress,
5046  VmaAllocation hAllocation);
5047 
5048  virtual void Free(const VmaAllocation allocation);
5049  virtual void FreeAtOffset(VkDeviceSize offset);
5050 
5051 private:
5052  /*
5053  There are two suballocation vectors, used in ping-pong way.
5054  The one with index m_1stVectorIndex is called 1st.
5055  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5056  2nd can be non-empty only when 1st is not empty.
5057  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5058  */
5059  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5060 
5061  enum SECOND_VECTOR_MODE
5062  {
5063  SECOND_VECTOR_EMPTY,
5064  /*
5065  Suballocations in 2nd vector are created later than the ones in 1st, but they
5066  all have smaller offset.
5067  */
5068  SECOND_VECTOR_RING_BUFFER,
5069  /*
5070  Suballocations in 2nd vector are upper side of double stack.
5071  They all have offsets higher than those in 1st vector.
5072  Top of this stack means smaller offsets, but higher indices in this vector.
5073  */
5074  SECOND_VECTOR_DOUBLE_STACK,
5075  };
5076 
5077  VkDeviceSize m_SumFreeSize;
5078  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5079  uint32_t m_1stVectorIndex;
5080  SECOND_VECTOR_MODE m_2ndVectorMode;
5081 
5082  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5083  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5084  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5085  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5086 
5087  // Number of items in 1st vector with hAllocation = null at the beginning.
5088  size_t m_1stNullItemsBeginCount;
5089  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5090  size_t m_1stNullItemsMiddleCount;
5091  // Number of items in 2nd vector with hAllocation = null.
5092  size_t m_2ndNullItemsCount;
5093 
5094  bool ShouldCompact1st() const;
5095  void CleanupAfterFree();
5096 };
5097 
5098 /*
5099 - GetSize() is the original size of allocated memory block.
5100 - m_UsableSize is this size aligned down to a power of two.
5101  All allocations and calculations happen relative to m_UsableSize.
5102 - GetUnusableSize() is the difference between them.
5103  It is repoted as separate, unused range, not available for allocations.
5104 
5105 Node at level 0 has size = m_UsableSize.
5106 Each next level contains nodes with size 2 times smaller than current level.
5107 m_LevelCount is the maximum number of levels to use in the current object.
5108 */
5109 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5110 {
5111  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5112 public:
5113  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5114  virtual ~VmaBlockMetadata_Buddy();
5115  virtual void Init(VkDeviceSize size);
5116 
5117  virtual bool Validate() const;
5118  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5119  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5120  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5121  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5122 
5123  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5124  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5125 
5126 #if VMA_STATS_STRING_ENABLED
5127  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5128 #endif
5129 
5130  virtual bool CreateAllocationRequest(
5131  uint32_t currentFrameIndex,
5132  uint32_t frameInUseCount,
5133  VkDeviceSize bufferImageGranularity,
5134  VkDeviceSize allocSize,
5135  VkDeviceSize allocAlignment,
5136  bool upperAddress,
5137  VmaSuballocationType allocType,
5138  bool canMakeOtherLost,
5139  uint32_t strategy,
5140  VmaAllocationRequest* pAllocationRequest);
5141 
5142  virtual bool MakeRequestedAllocationsLost(
5143  uint32_t currentFrameIndex,
5144  uint32_t frameInUseCount,
5145  VmaAllocationRequest* pAllocationRequest);
5146 
5147  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5148 
5149  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5150 
5151  virtual void Alloc(
5152  const VmaAllocationRequest& request,
5153  VmaSuballocationType type,
5154  VkDeviceSize allocSize,
5155  bool upperAddress,
5156  VmaAllocation hAllocation);
5157 
5158  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5159  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5160 
5161 private:
5162  static const VkDeviceSize MIN_NODE_SIZE = 32;
5163  static const size_t MAX_LEVELS = 30;
5164 
5165  struct ValidationContext
5166  {
5167  size_t calculatedAllocationCount;
5168  size_t calculatedFreeCount;
5169  VkDeviceSize calculatedSumFreeSize;
5170 
5171  ValidationContext() :
5172  calculatedAllocationCount(0),
5173  calculatedFreeCount(0),
5174  calculatedSumFreeSize(0) { }
5175  };
5176 
5177  struct Node
5178  {
5179  VkDeviceSize offset;
5180  enum TYPE
5181  {
5182  TYPE_FREE,
5183  TYPE_ALLOCATION,
5184  TYPE_SPLIT,
5185  TYPE_COUNT
5186  } type;
5187  Node* parent;
5188  Node* buddy;
5189 
5190  union
5191  {
5192  struct
5193  {
5194  Node* prev;
5195  Node* next;
5196  } free;
5197  struct
5198  {
5199  VmaAllocation alloc;
5200  } allocation;
5201  struct
5202  {
5203  Node* leftChild;
5204  } split;
5205  };
5206  };
5207 
5208  // Size of the memory block aligned down to a power of two.
5209  VkDeviceSize m_UsableSize;
5210  uint32_t m_LevelCount;
5211 
5212  Node* m_Root;
5213  struct {
5214  Node* front;
5215  Node* back;
5216  } m_FreeList[MAX_LEVELS];
5217  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5218  size_t m_AllocationCount;
5219  // Number of nodes in the tree with type == TYPE_FREE.
5220  size_t m_FreeCount;
5221  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5222  VkDeviceSize m_SumFreeSize;
5223 
5224  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5225  void DeleteNode(Node* node);
5226  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5227  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5228  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5229  // Alloc passed just for validation. Can be null.
5230  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5231  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5232  // Adds node to the front of FreeList at given level.
5233  // node->type must be FREE.
5234  // node->free.prev, next can be undefined.
5235  void AddToFreeListFront(uint32_t level, Node* node);
5236  // Removes node from FreeList at given level.
5237  // node->type must be FREE.
5238  // node->free.prev, next stay untouched.
5239  void RemoveFromFreeList(uint32_t level, Node* node);
5240 
5241 #if VMA_STATS_STRING_ENABLED
5242  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5243 #endif
5244 };
5245 
5246 /*
5247 Represents a single block of device memory (`VkDeviceMemory`) with all the
5248 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5249 
5250 Thread-safety: This class must be externally synchronized.
5251 */
5252 class VmaDeviceMemoryBlock
5253 {
5254  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5255 public:
5256  VmaBlockMetadata* m_pMetadata;
5257 
5258  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5259 
5260  ~VmaDeviceMemoryBlock()
5261  {
5262  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5263  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5264  }
5265 
5266  // Always call after construction.
5267  void Init(
5268  VmaAllocator hAllocator,
5269  uint32_t newMemoryTypeIndex,
5270  VkDeviceMemory newMemory,
5271  VkDeviceSize newSize,
5272  uint32_t id,
5273  uint32_t algorithm);
5274  // Always call before destruction.
5275  void Destroy(VmaAllocator allocator);
5276 
5277  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5278  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5279  uint32_t GetId() const { return m_Id; }
5280  void* GetMappedData() const { return m_pMappedData; }
5281 
5282  // Validates all data structures inside this object. If not valid, returns false.
5283  bool Validate() const;
5284 
5285  VkResult CheckCorruption(VmaAllocator hAllocator);
5286 
5287  // ppData can be null.
5288  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5289  void Unmap(VmaAllocator hAllocator, uint32_t count);
5290 
5291  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5292  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5293 
5294  VkResult BindBufferMemory(
5295  const VmaAllocator hAllocator,
5296  const VmaAllocation hAllocation,
5297  VkBuffer hBuffer);
5298  VkResult BindImageMemory(
5299  const VmaAllocator hAllocator,
5300  const VmaAllocation hAllocation,
5301  VkImage hImage);
5302 
5303 private:
5304  uint32_t m_MemoryTypeIndex;
5305  uint32_t m_Id;
5306  VkDeviceMemory m_hMemory;
5307 
5308  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5309  // Also protects m_MapCount, m_pMappedData.
5310  VMA_MUTEX m_Mutex;
5311  uint32_t m_MapCount;
5312  void* m_pMappedData;
5313 };
5314 
5315 struct VmaPointerLess
5316 {
5317  bool operator()(const void* lhs, const void* rhs) const
5318  {
5319  return lhs < rhs;
5320  }
5321 };
5322 
5323 class VmaDefragmentator;
5324 
5325 /*
5326 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5327 Vulkan memory type.
5328 
5329 Synchronized internally with a mutex.
5330 */
5331 struct VmaBlockVector
5332 {
5333  VMA_CLASS_NO_COPY(VmaBlockVector)
5334 public:
5335  VmaBlockVector(
5336  VmaAllocator hAllocator,
5337  uint32_t memoryTypeIndex,
5338  VkDeviceSize preferredBlockSize,
5339  size_t minBlockCount,
5340  size_t maxBlockCount,
5341  VkDeviceSize bufferImageGranularity,
5342  uint32_t frameInUseCount,
5343  bool isCustomPool,
5344  bool explicitBlockSize,
5345  uint32_t algorithm);
5346  ~VmaBlockVector();
5347 
5348  VkResult CreateMinBlocks();
5349 
5350  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5351  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5352  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5353  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5354  uint32_t GetAlgorithm() const { return m_Algorithm; }
5355 
5356  void GetPoolStats(VmaPoolStats* pStats);
5357 
5358  bool IsEmpty() const { return m_Blocks.empty(); }
5359  bool IsCorruptionDetectionEnabled() const;
5360 
5361  VkResult Allocate(
5362  VmaPool hCurrentPool,
5363  uint32_t currentFrameIndex,
5364  VkDeviceSize size,
5365  VkDeviceSize alignment,
5366  const VmaAllocationCreateInfo& createInfo,
5367  VmaSuballocationType suballocType,
5368  VmaAllocation* pAllocation);
5369 
5370  void Free(
5371  VmaAllocation hAllocation);
5372 
5373  // Adds statistics of this BlockVector to pStats.
5374  void AddStats(VmaStats* pStats);
5375 
5376 #if VMA_STATS_STRING_ENABLED
5377  void PrintDetailedMap(class VmaJsonWriter& json);
5378 #endif
5379 
5380  void MakePoolAllocationsLost(
5381  uint32_t currentFrameIndex,
5382  size_t* pLostAllocationCount);
5383  VkResult CheckCorruption();
5384 
5385  VmaDefragmentator* EnsureDefragmentator(
5386  VmaAllocator hAllocator,
5387  uint32_t currentFrameIndex);
5388 
5389  VkResult Defragment(
5390  VmaDefragmentationStats* pDefragmentationStats,
5391  VkDeviceSize& maxBytesToMove,
5392  uint32_t& maxAllocationsToMove);
5393 
5394  void DestroyDefragmentator();
5395 
5396 private:
5397  friend class VmaDefragmentator;
5398 
5399  const VmaAllocator m_hAllocator;
5400  const uint32_t m_MemoryTypeIndex;
5401  const VkDeviceSize m_PreferredBlockSize;
5402  const size_t m_MinBlockCount;
5403  const size_t m_MaxBlockCount;
5404  const VkDeviceSize m_BufferImageGranularity;
5405  const uint32_t m_FrameInUseCount;
5406  const bool m_IsCustomPool;
5407  const bool m_ExplicitBlockSize;
5408  const uint32_t m_Algorithm;
5409  bool m_HasEmptyBlock;
5410  VMA_MUTEX m_Mutex;
5411  // Incrementally sorted by sumFreeSize, ascending.
5412  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5413  /* There can be at most one allocation that is completely empty - a
5414  hysteresis to avoid pessimistic case of alternating creation and destruction
5415  of a VkDeviceMemory. */
5416  VmaDefragmentator* m_pDefragmentator;
5417  uint32_t m_NextBlockId;
5418 
5419  VkDeviceSize CalcMaxBlockSize() const;
5420 
5421  // Finds and removes given block from vector.
5422  void Remove(VmaDeviceMemoryBlock* pBlock);
5423 
5424  // Performs single step in sorting m_Blocks. They may not be fully sorted
5425  // after this call.
5426  void IncrementallySortBlocks();
5427 
5428  // To be used only without CAN_MAKE_OTHER_LOST flag.
5429  VkResult AllocateFromBlock(
5430  VmaDeviceMemoryBlock* pBlock,
5431  VmaPool hCurrentPool,
5432  uint32_t currentFrameIndex,
5433  VkDeviceSize size,
5434  VkDeviceSize alignment,
5435  VmaAllocationCreateFlags allocFlags,
5436  void* pUserData,
5437  VmaSuballocationType suballocType,
5438  uint32_t strategy,
5439  VmaAllocation* pAllocation);
5440 
5441  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5442 };
5443 
5444 struct VmaPool_T
5445 {
5446  VMA_CLASS_NO_COPY(VmaPool_T)
5447 public:
5448  VmaBlockVector m_BlockVector;
5449 
5450  VmaPool_T(
5451  VmaAllocator hAllocator,
5452  const VmaPoolCreateInfo& createInfo,
5453  VkDeviceSize preferredBlockSize);
5454  ~VmaPool_T();
5455 
5456  uint32_t GetId() const { return m_Id; }
5457  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5458 
5459 #if VMA_STATS_STRING_ENABLED
5460  //void PrintDetailedMap(class VmaStringBuilder& sb);
5461 #endif
5462 
5463 private:
5464  uint32_t m_Id;
5465 };
5466 
5467 class VmaDefragmentator
5468 {
5469  VMA_CLASS_NO_COPY(VmaDefragmentator)
5470 private:
5471  const VmaAllocator m_hAllocator;
5472  VmaBlockVector* const m_pBlockVector;
5473  uint32_t m_CurrentFrameIndex;
5474  VkDeviceSize m_BytesMoved;
5475  uint32_t m_AllocationsMoved;
5476 
5477  struct AllocationInfo
5478  {
5479  VmaAllocation m_hAllocation;
5480  VkBool32* m_pChanged;
5481 
5482  AllocationInfo() :
5483  m_hAllocation(VK_NULL_HANDLE),
5484  m_pChanged(VMA_NULL)
5485  {
5486  }
5487  };
5488 
5489  struct AllocationInfoSizeGreater
5490  {
5491  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5492  {
5493  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5494  }
5495  };
5496 
5497  // Used between AddAllocation and Defragment.
5498  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5499 
5500  struct BlockInfo
5501  {
5502  VmaDeviceMemoryBlock* m_pBlock;
5503  bool m_HasNonMovableAllocations;
5504  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5505 
5506  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5507  m_pBlock(VMA_NULL),
5508  m_HasNonMovableAllocations(true),
5509  m_Allocations(pAllocationCallbacks),
5510  m_pMappedDataForDefragmentation(VMA_NULL)
5511  {
5512  }
5513 
5514  void CalcHasNonMovableAllocations()
5515  {
5516  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5517  const size_t defragmentAllocCount = m_Allocations.size();
5518  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5519  }
5520 
5521  void SortAllocationsBySizeDescecnding()
5522  {
5523  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5524  }
5525 
5526  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5527  void Unmap(VmaAllocator hAllocator);
5528 
5529  private:
5530  // Not null if mapped for defragmentation only, not originally mapped.
5531  void* m_pMappedDataForDefragmentation;
5532  };
5533 
5534  struct BlockPointerLess
5535  {
5536  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5537  {
5538  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5539  }
5540  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5541  {
5542  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5543  }
5544  };
5545 
5546  // 1. Blocks with some non-movable allocations go first.
5547  // 2. Blocks with smaller sumFreeSize go first.
5548  struct BlockInfoCompareMoveDestination
5549  {
5550  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5551  {
5552  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5553  {
5554  return true;
5555  }
5556  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5557  {
5558  return false;
5559  }
5560  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5561  {
5562  return true;
5563  }
5564  return false;
5565  }
5566  };
5567 
5568  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5569  BlockInfoVector m_Blocks;
5570 
5571  VkResult DefragmentRound(
5572  VkDeviceSize maxBytesToMove,
5573  uint32_t maxAllocationsToMove);
5574 
5575  static bool MoveMakesSense(
5576  size_t dstBlockIndex, VkDeviceSize dstOffset,
5577  size_t srcBlockIndex, VkDeviceSize srcOffset);
5578 
5579 public:
5580  VmaDefragmentator(
5581  VmaAllocator hAllocator,
5582  VmaBlockVector* pBlockVector,
5583  uint32_t currentFrameIndex);
5584 
5585  ~VmaDefragmentator();
5586 
5587  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5588  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5589 
5590  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5591 
5592  VkResult Defragment(
5593  VkDeviceSize maxBytesToMove,
5594  uint32_t maxAllocationsToMove);
5595 };
5596 
5597 #if VMA_RECORDING_ENABLED
5598 
5599 class VmaRecorder
5600 {
5601 public:
5602  VmaRecorder();
5603  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5604  void WriteConfiguration(
5605  const VkPhysicalDeviceProperties& devProps,
5606  const VkPhysicalDeviceMemoryProperties& memProps,
5607  bool dedicatedAllocationExtensionEnabled);
5608  ~VmaRecorder();
5609 
5610  void RecordCreateAllocator(uint32_t frameIndex);
5611  void RecordDestroyAllocator(uint32_t frameIndex);
5612  void RecordCreatePool(uint32_t frameIndex,
5613  const VmaPoolCreateInfo& createInfo,
5614  VmaPool pool);
5615  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5616  void RecordAllocateMemory(uint32_t frameIndex,
5617  const VkMemoryRequirements& vkMemReq,
5618  const VmaAllocationCreateInfo& createInfo,
5619  VmaAllocation allocation);
5620  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5621  const VkMemoryRequirements& vkMemReq,
5622  bool requiresDedicatedAllocation,
5623  bool prefersDedicatedAllocation,
5624  const VmaAllocationCreateInfo& createInfo,
5625  VmaAllocation allocation);
5626  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5627  const VkMemoryRequirements& vkMemReq,
5628  bool requiresDedicatedAllocation,
5629  bool prefersDedicatedAllocation,
5630  const VmaAllocationCreateInfo& createInfo,
5631  VmaAllocation allocation);
5632  void RecordFreeMemory(uint32_t frameIndex,
5633  VmaAllocation allocation);
5634  void RecordResizeAllocation(
5635  uint32_t frameIndex,
5636  VmaAllocation allocation,
5637  VkDeviceSize newSize);
5638  void RecordSetAllocationUserData(uint32_t frameIndex,
5639  VmaAllocation allocation,
5640  const void* pUserData);
5641  void RecordCreateLostAllocation(uint32_t frameIndex,
5642  VmaAllocation allocation);
5643  void RecordMapMemory(uint32_t frameIndex,
5644  VmaAllocation allocation);
5645  void RecordUnmapMemory(uint32_t frameIndex,
5646  VmaAllocation allocation);
5647  void RecordFlushAllocation(uint32_t frameIndex,
5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5649  void RecordInvalidateAllocation(uint32_t frameIndex,
5650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5651  void RecordCreateBuffer(uint32_t frameIndex,
5652  const VkBufferCreateInfo& bufCreateInfo,
5653  const VmaAllocationCreateInfo& allocCreateInfo,
5654  VmaAllocation allocation);
5655  void RecordCreateImage(uint32_t frameIndex,
5656  const VkImageCreateInfo& imageCreateInfo,
5657  const VmaAllocationCreateInfo& allocCreateInfo,
5658  VmaAllocation allocation);
5659  void RecordDestroyBuffer(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordDestroyImage(uint32_t frameIndex,
5662  VmaAllocation allocation);
5663  void RecordTouchAllocation(uint32_t frameIndex,
5664  VmaAllocation allocation);
5665  void RecordGetAllocationInfo(uint32_t frameIndex,
5666  VmaAllocation allocation);
5667  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5668  VmaPool pool);
5669 
5670 private:
5671  struct CallParams
5672  {
5673  uint32_t threadId;
5674  double time;
5675  };
5676 
5677  class UserDataString
5678  {
5679  public:
5680  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5681  const char* GetString() const { return m_Str; }
5682 
5683  private:
5684  char m_PtrStr[17];
5685  const char* m_Str;
5686  };
5687 
5688  bool m_UseMutex;
5689  VmaRecordFlags m_Flags;
5690  FILE* m_File;
5691  VMA_MUTEX m_FileMutex;
5692  int64_t m_Freq;
5693  int64_t m_StartCounter;
5694 
5695  void GetBasicParams(CallParams& outParams);
5696  void Flush();
5697 };
5698 
5699 #endif // #if VMA_RECORDING_ENABLED
5700 
5701 // Main allocator object.
5702 struct VmaAllocator_T
5703 {
5704  VMA_CLASS_NO_COPY(VmaAllocator_T)
5705 public:
5706  bool m_UseMutex;
5707  bool m_UseKhrDedicatedAllocation;
5708  VkDevice m_hDevice;
5709  bool m_AllocationCallbacksSpecified;
5710  VkAllocationCallbacks m_AllocationCallbacks;
5711  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5712 
5713  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5714  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5715  VMA_MUTEX m_HeapSizeLimitMutex;
5716 
5717  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5718  VkPhysicalDeviceMemoryProperties m_MemProps;
5719 
5720  // Default pools.
5721  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5722 
5723  // Each vector is sorted by memory (handle value).
5724  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5725  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5726  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5727 
5728  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5729  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5730  ~VmaAllocator_T();
5731 
5732  const VkAllocationCallbacks* GetAllocationCallbacks() const
5733  {
5734  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5735  }
5736  const VmaVulkanFunctions& GetVulkanFunctions() const
5737  {
5738  return m_VulkanFunctions;
5739  }
5740 
5741  VkDeviceSize GetBufferImageGranularity() const
5742  {
5743  return VMA_MAX(
5744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5746  }
5747 
5748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5750 
5751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5752  {
5753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5755  }
5756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5758  {
5759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5761  }
5762  // Minimum alignment for all allocations in specific memory type.
5763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5764  {
5765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5768  }
5769 
5770  bool IsIntegratedGpu() const
5771  {
5772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5773  }
5774 
5775 #if VMA_RECORDING_ENABLED
5776  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5777 #endif
5778 
5779  void GetBufferMemoryRequirements(
5780  VkBuffer hBuffer,
5781  VkMemoryRequirements& memReq,
5782  bool& requiresDedicatedAllocation,
5783  bool& prefersDedicatedAllocation) const;
5784  void GetImageMemoryRequirements(
5785  VkImage hImage,
5786  VkMemoryRequirements& memReq,
5787  bool& requiresDedicatedAllocation,
5788  bool& prefersDedicatedAllocation) const;
5789 
5790  // Main allocation function.
5791  VkResult AllocateMemory(
5792  const VkMemoryRequirements& vkMemReq,
5793  bool requiresDedicatedAllocation,
5794  bool prefersDedicatedAllocation,
5795  VkBuffer dedicatedBuffer,
5796  VkImage dedicatedImage,
5797  const VmaAllocationCreateInfo& createInfo,
5798  VmaSuballocationType suballocType,
5799  VmaAllocation* pAllocation);
5800 
5801  // Main deallocation function.
5802  void FreeMemory(const VmaAllocation allocation);
5803 
5804  VkResult ResizeAllocation(
5805  const VmaAllocation alloc,
5806  VkDeviceSize newSize);
5807 
5808  void CalculateStats(VmaStats* pStats);
5809 
5810 #if VMA_STATS_STRING_ENABLED
5811  void PrintDetailedMap(class VmaJsonWriter& json);
5812 #endif
5813 
5814  VkResult Defragment(
5815  VmaAllocation* pAllocations,
5816  size_t allocationCount,
5817  VkBool32* pAllocationsChanged,
5818  const VmaDefragmentationInfo* pDefragmentationInfo,
5819  VmaDefragmentationStats* pDefragmentationStats);
5820 
5821  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5822  bool TouchAllocation(VmaAllocation hAllocation);
5823 
5824  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5825  void DestroyPool(VmaPool pool);
5826  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5827 
5828  void SetCurrentFrameIndex(uint32_t frameIndex);
5829  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5830 
5831  void MakePoolAllocationsLost(
5832  VmaPool hPool,
5833  size_t* pLostAllocationCount);
5834  VkResult CheckPoolCorruption(VmaPool hPool);
5835  VkResult CheckCorruption(uint32_t memoryTypeBits);
5836 
5837  void CreateLostAllocation(VmaAllocation* pAllocation);
5838 
5839  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5840  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5841 
5842  VkResult Map(VmaAllocation hAllocation, void** ppData);
5843  void Unmap(VmaAllocation hAllocation);
5844 
5845  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5846  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5847 
5848  void FlushOrInvalidateAllocation(
5849  VmaAllocation hAllocation,
5850  VkDeviceSize offset, VkDeviceSize size,
5851  VMA_CACHE_OPERATION op);
5852 
5853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5854 
5855 private:
5856  VkDeviceSize m_PreferredLargeHeapBlockSize;
5857 
5858  VkPhysicalDevice m_PhysicalDevice;
5859  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5860 
5861  VMA_MUTEX m_PoolsMutex;
5862  // Protected by m_PoolsMutex. Sorted by pointer value.
5863  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5864  uint32_t m_NextPoolId;
5865 
5866  VmaVulkanFunctions m_VulkanFunctions;
5867 
5868 #if VMA_RECORDING_ENABLED
5869  VmaRecorder* m_pRecorder;
5870 #endif
5871 
5872  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5873 
5874  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5875 
5876  VkResult AllocateMemoryOfType(
5877  VkDeviceSize size,
5878  VkDeviceSize alignment,
5879  bool dedicatedAllocation,
5880  VkBuffer dedicatedBuffer,
5881  VkImage dedicatedImage,
5882  const VmaAllocationCreateInfo& createInfo,
5883  uint32_t memTypeIndex,
5884  VmaSuballocationType suballocType,
5885  VmaAllocation* pAllocation);
5886 
5887  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5888  VkResult AllocateDedicatedMemory(
5889  VkDeviceSize size,
5890  VmaSuballocationType suballocType,
5891  uint32_t memTypeIndex,
5892  bool map,
5893  bool isUserDataString,
5894  void* pUserData,
5895  VkBuffer dedicatedBuffer,
5896  VkImage dedicatedImage,
5897  VmaAllocation* pAllocation);
5898 
5899  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5900  void FreeDedicatedMemory(VmaAllocation allocation);
5901 };
5902 
5904 // Memory allocation #2 after VmaAllocator_T definition
5905 
5906 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5907 {
5908  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5909 }
5910 
5911 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5912 {
5913  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5914 }
5915 
5916 template<typename T>
5917 static T* VmaAllocate(VmaAllocator hAllocator)
5918 {
5919  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5920 }
5921 
5922 template<typename T>
5923 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5924 {
5925  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5926 }
5927 
5928 template<typename T>
5929 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5930 {
5931  if(ptr != VMA_NULL)
5932  {
5933  ptr->~T();
5934  VmaFree(hAllocator, ptr);
5935  }
5936 }
5937 
5938 template<typename T>
5939 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5940 {
5941  if(ptr != VMA_NULL)
5942  {
5943  for(size_t i = count; i--; )
5944  ptr[i].~T();
5945  VmaFree(hAllocator, ptr);
5946  }
5947 }
5948 
5950 // VmaStringBuilder
5951 
5952 #if VMA_STATS_STRING_ENABLED
5953 
5954 class VmaStringBuilder
5955 {
5956 public:
5957  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5958  size_t GetLength() const { return m_Data.size(); }
5959  const char* GetData() const { return m_Data.data(); }
5960 
5961  void Add(char ch) { m_Data.push_back(ch); }
5962  void Add(const char* pStr);
5963  void AddNewLine() { Add('\n'); }
5964  void AddNumber(uint32_t num);
5965  void AddNumber(uint64_t num);
5966  void AddPointer(const void* ptr);
5967 
5968 private:
5969  VmaVector< char, VmaStlAllocator<char> > m_Data;
5970 };
5971 
5972 void VmaStringBuilder::Add(const char* pStr)
5973 {
5974  const size_t strLen = strlen(pStr);
5975  if(strLen > 0)
5976  {
5977  const size_t oldCount = m_Data.size();
5978  m_Data.resize(oldCount + strLen);
5979  memcpy(m_Data.data() + oldCount, pStr, strLen);
5980  }
5981 }
5982 
5983 void VmaStringBuilder::AddNumber(uint32_t num)
5984 {
5985  char buf[11];
5986  VmaUint32ToStr(buf, sizeof(buf), num);
5987  Add(buf);
5988 }
5989 
5990 void VmaStringBuilder::AddNumber(uint64_t num)
5991 {
5992  char buf[21];
5993  VmaUint64ToStr(buf, sizeof(buf), num);
5994  Add(buf);
5995 }
5996 
5997 void VmaStringBuilder::AddPointer(const void* ptr)
5998 {
5999  char buf[21];
6000  VmaPtrToStr(buf, sizeof(buf), ptr);
6001  Add(buf);
6002 }
6003 
6004 #endif // #if VMA_STATS_STRING_ENABLED
6005 
6007 // VmaJsonWriter
6008 
6009 #if VMA_STATS_STRING_ENABLED
6010 
6011 class VmaJsonWriter
6012 {
6013  VMA_CLASS_NO_COPY(VmaJsonWriter)
6014 public:
6015  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6016  ~VmaJsonWriter();
6017 
6018  void BeginObject(bool singleLine = false);
6019  void EndObject();
6020 
6021  void BeginArray(bool singleLine = false);
6022  void EndArray();
6023 
6024  void WriteString(const char* pStr);
6025  void BeginString(const char* pStr = VMA_NULL);
6026  void ContinueString(const char* pStr);
6027  void ContinueString(uint32_t n);
6028  void ContinueString(uint64_t n);
6029  void ContinueString_Pointer(const void* ptr);
6030  void EndString(const char* pStr = VMA_NULL);
6031 
6032  void WriteNumber(uint32_t n);
6033  void WriteNumber(uint64_t n);
6034  void WriteBool(bool b);
6035  void WriteNull();
6036 
6037 private:
6038  static const char* const INDENT;
6039 
6040  enum COLLECTION_TYPE
6041  {
6042  COLLECTION_TYPE_OBJECT,
6043  COLLECTION_TYPE_ARRAY,
6044  };
6045  struct StackItem
6046  {
6047  COLLECTION_TYPE type;
6048  uint32_t valueCount;
6049  bool singleLineMode;
6050  };
6051 
6052  VmaStringBuilder& m_SB;
6053  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6054  bool m_InsideString;
6055 
6056  void BeginValue(bool isString);
6057  void WriteIndent(bool oneLess = false);
6058 };
6059 
6060 const char* const VmaJsonWriter::INDENT = " ";
6061 
6062 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6063  m_SB(sb),
6064  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6065  m_InsideString(false)
6066 {
6067 }
6068 
6069 VmaJsonWriter::~VmaJsonWriter()
6070 {
6071  VMA_ASSERT(!m_InsideString);
6072  VMA_ASSERT(m_Stack.empty());
6073 }
6074 
6075 void VmaJsonWriter::BeginObject(bool singleLine)
6076 {
6077  VMA_ASSERT(!m_InsideString);
6078 
6079  BeginValue(false);
6080  m_SB.Add('{');
6081 
6082  StackItem item;
6083  item.type = COLLECTION_TYPE_OBJECT;
6084  item.valueCount = 0;
6085  item.singleLineMode = singleLine;
6086  m_Stack.push_back(item);
6087 }
6088 
6089 void VmaJsonWriter::EndObject()
6090 {
6091  VMA_ASSERT(!m_InsideString);
6092 
6093  WriteIndent(true);
6094  m_SB.Add('}');
6095 
6096  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6097  m_Stack.pop_back();
6098 }
6099 
6100 void VmaJsonWriter::BeginArray(bool singleLine)
6101 {
6102  VMA_ASSERT(!m_InsideString);
6103 
6104  BeginValue(false);
6105  m_SB.Add('[');
6106 
6107  StackItem item;
6108  item.type = COLLECTION_TYPE_ARRAY;
6109  item.valueCount = 0;
6110  item.singleLineMode = singleLine;
6111  m_Stack.push_back(item);
6112 }
6113 
6114 void VmaJsonWriter::EndArray()
6115 {
6116  VMA_ASSERT(!m_InsideString);
6117 
6118  WriteIndent(true);
6119  m_SB.Add(']');
6120 
6121  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6122  m_Stack.pop_back();
6123 }
6124 
6125 void VmaJsonWriter::WriteString(const char* pStr)
6126 {
6127  BeginString(pStr);
6128  EndString();
6129 }
6130 
6131 void VmaJsonWriter::BeginString(const char* pStr)
6132 {
6133  VMA_ASSERT(!m_InsideString);
6134 
6135  BeginValue(true);
6136  m_SB.Add('"');
6137  m_InsideString = true;
6138  if(pStr != VMA_NULL && pStr[0] != '\0')
6139  {
6140  ContinueString(pStr);
6141  }
6142 }
6143 
6144 void VmaJsonWriter::ContinueString(const char* pStr)
6145 {
6146  VMA_ASSERT(m_InsideString);
6147 
6148  const size_t strLen = strlen(pStr);
6149  for(size_t i = 0; i < strLen; ++i)
6150  {
6151  char ch = pStr[i];
6152  if(ch == '\\')
6153  {
6154  m_SB.Add("\\\\");
6155  }
6156  else if(ch == '"')
6157  {
6158  m_SB.Add("\\\"");
6159  }
6160  else if(ch >= 32)
6161  {
6162  m_SB.Add(ch);
6163  }
6164  else switch(ch)
6165  {
6166  case '\b':
6167  m_SB.Add("\\b");
6168  break;
6169  case '\f':
6170  m_SB.Add("\\f");
6171  break;
6172  case '\n':
6173  m_SB.Add("\\n");
6174  break;
6175  case '\r':
6176  m_SB.Add("\\r");
6177  break;
6178  case '\t':
6179  m_SB.Add("\\t");
6180  break;
6181  default:
6182  VMA_ASSERT(0 && "Character not currently supported.");
6183  break;
6184  }
6185  }
6186 }
6187 
6188 void VmaJsonWriter::ContinueString(uint32_t n)
6189 {
6190  VMA_ASSERT(m_InsideString);
6191  m_SB.AddNumber(n);
6192 }
6193 
6194 void VmaJsonWriter::ContinueString(uint64_t n)
6195 {
6196  VMA_ASSERT(m_InsideString);
6197  m_SB.AddNumber(n);
6198 }
6199 
6200 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6201 {
6202  VMA_ASSERT(m_InsideString);
6203  m_SB.AddPointer(ptr);
6204 }
6205 
6206 void VmaJsonWriter::EndString(const char* pStr)
6207 {
6208  VMA_ASSERT(m_InsideString);
6209  if(pStr != VMA_NULL && pStr[0] != '\0')
6210  {
6211  ContinueString(pStr);
6212  }
6213  m_SB.Add('"');
6214  m_InsideString = false;
6215 }
6216 
6217 void VmaJsonWriter::WriteNumber(uint32_t n)
6218 {
6219  VMA_ASSERT(!m_InsideString);
6220  BeginValue(false);
6221  m_SB.AddNumber(n);
6222 }
6223 
6224 void VmaJsonWriter::WriteNumber(uint64_t n)
6225 {
6226  VMA_ASSERT(!m_InsideString);
6227  BeginValue(false);
6228  m_SB.AddNumber(n);
6229 }
6230 
6231 void VmaJsonWriter::WriteBool(bool b)
6232 {
6233  VMA_ASSERT(!m_InsideString);
6234  BeginValue(false);
6235  m_SB.Add(b ? "true" : "false");
6236 }
6237 
6238 void VmaJsonWriter::WriteNull()
6239 {
6240  VMA_ASSERT(!m_InsideString);
6241  BeginValue(false);
6242  m_SB.Add("null");
6243 }
6244 
6245 void VmaJsonWriter::BeginValue(bool isString)
6246 {
6247  if(!m_Stack.empty())
6248  {
6249  StackItem& currItem = m_Stack.back();
6250  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6251  currItem.valueCount % 2 == 0)
6252  {
6253  VMA_ASSERT(isString);
6254  }
6255 
6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6257  currItem.valueCount % 2 != 0)
6258  {
6259  m_SB.Add(": ");
6260  }
6261  else if(currItem.valueCount > 0)
6262  {
6263  m_SB.Add(", ");
6264  WriteIndent();
6265  }
6266  else
6267  {
6268  WriteIndent();
6269  }
6270  ++currItem.valueCount;
6271  }
6272 }
6273 
6274 void VmaJsonWriter::WriteIndent(bool oneLess)
6275 {
6276  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6277  {
6278  m_SB.AddNewLine();
6279 
6280  size_t count = m_Stack.size();
6281  if(count > 0 && oneLess)
6282  {
6283  --count;
6284  }
6285  for(size_t i = 0; i < count; ++i)
6286  {
6287  m_SB.Add(INDENT);
6288  }
6289  }
6290 }
6291 
6292 #endif // #if VMA_STATS_STRING_ENABLED
6293 
6295 
6296 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6297 {
6298  if(IsUserDataString())
6299  {
6300  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6301 
6302  FreeUserDataString(hAllocator);
6303 
6304  if(pUserData != VMA_NULL)
6305  {
6306  const char* const newStrSrc = (char*)pUserData;
6307  const size_t newStrLen = strlen(newStrSrc);
6308  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6309  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6310  m_pUserData = newStrDst;
6311  }
6312  }
6313  else
6314  {
6315  m_pUserData = pUserData;
6316  }
6317 }
6318 
6319 void VmaAllocation_T::ChangeBlockAllocation(
6320  VmaAllocator hAllocator,
6321  VmaDeviceMemoryBlock* block,
6322  VkDeviceSize offset)
6323 {
6324  VMA_ASSERT(block != VMA_NULL);
6325  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6326 
6327  // Move mapping reference counter from old block to new block.
6328  if(block != m_BlockAllocation.m_Block)
6329  {
6330  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6331  if(IsPersistentMap())
6332  ++mapRefCount;
6333  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6334  block->Map(hAllocator, mapRefCount, VMA_NULL);
6335  }
6336 
6337  m_BlockAllocation.m_Block = block;
6338  m_BlockAllocation.m_Offset = offset;
6339 }
6340 
6341 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6342 {
6343  VMA_ASSERT(newSize > 0);
6344  m_Size = newSize;
6345 }
6346 
6347 VkDeviceSize VmaAllocation_T::GetOffset() const
6348 {
6349  switch(m_Type)
6350  {
6351  case ALLOCATION_TYPE_BLOCK:
6352  return m_BlockAllocation.m_Offset;
6353  case ALLOCATION_TYPE_DEDICATED:
6354  return 0;
6355  default:
6356  VMA_ASSERT(0);
6357  return 0;
6358  }
6359 }
6360 
6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
6362 {
6363  switch(m_Type)
6364  {
6365  case ALLOCATION_TYPE_BLOCK:
6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
6367  case ALLOCATION_TYPE_DEDICATED:
6368  return m_DedicatedAllocation.m_hMemory;
6369  default:
6370  VMA_ASSERT(0);
6371  return VK_NULL_HANDLE;
6372  }
6373 }
6374 
6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6376 {
6377  switch(m_Type)
6378  {
6379  case ALLOCATION_TYPE_BLOCK:
6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6381  case ALLOCATION_TYPE_DEDICATED:
6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
6383  default:
6384  VMA_ASSERT(0);
6385  return UINT32_MAX;
6386  }
6387 }
6388 
6389 void* VmaAllocation_T::GetMappedData() const
6390 {
6391  switch(m_Type)
6392  {
6393  case ALLOCATION_TYPE_BLOCK:
6394  if(m_MapCount != 0)
6395  {
6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6397  VMA_ASSERT(pBlockData != VMA_NULL);
6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6399  }
6400  else
6401  {
6402  return VMA_NULL;
6403  }
6404  break;
6405  case ALLOCATION_TYPE_DEDICATED:
6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6407  return m_DedicatedAllocation.m_pMappedData;
6408  default:
6409  VMA_ASSERT(0);
6410  return VMA_NULL;
6411  }
6412 }
6413 
6414 bool VmaAllocation_T::CanBecomeLost() const
6415 {
6416  switch(m_Type)
6417  {
6418  case ALLOCATION_TYPE_BLOCK:
6419  return m_BlockAllocation.m_CanBecomeLost;
6420  case ALLOCATION_TYPE_DEDICATED:
6421  return false;
6422  default:
6423  VMA_ASSERT(0);
6424  return false;
6425  }
6426 }
6427 
6428 VmaPool VmaAllocation_T::GetPool() const
6429 {
6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6431  return m_BlockAllocation.m_hPool;
6432 }
6433 
6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6435 {
6436  VMA_ASSERT(CanBecomeLost());
6437 
6438  /*
6439  Warning: This is a carefully designed algorithm.
6440  Do not modify unless you really know what you're doing :)
6441  */
6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6443  for(;;)
6444  {
6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6446  {
6447  VMA_ASSERT(0);
6448  return false;
6449  }
6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6451  {
6452  return false;
6453  }
6454  else // Last use time earlier than current time.
6455  {
6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6457  {
6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6460  return true;
6461  }
6462  }
6463  }
6464 }
6465 
6466 #if VMA_STATS_STRING_ENABLED
6467 
6468 // Correspond to values of enum VmaSuballocationType.
6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6470  "FREE",
6471  "UNKNOWN",
6472  "BUFFER",
6473  "IMAGE_UNKNOWN",
6474  "IMAGE_LINEAR",
6475  "IMAGE_OPTIMAL",
6476 };
6477 
6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6479 {
6480  json.WriteString("Type");
6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6482 
6483  json.WriteString("Size");
6484  json.WriteNumber(m_Size);
6485 
6486  if(m_pUserData != VMA_NULL)
6487  {
6488  json.WriteString("UserData");
6489  if(IsUserDataString())
6490  {
6491  json.WriteString((const char*)m_pUserData);
6492  }
6493  else
6494  {
6495  json.BeginString();
6496  json.ContinueString_Pointer(m_pUserData);
6497  json.EndString();
6498  }
6499  }
6500 
6501  json.WriteString("CreationFrameIndex");
6502  json.WriteNumber(m_CreationFrameIndex);
6503 
6504  json.WriteString("LastUseFrameIndex");
6505  json.WriteNumber(GetLastUseFrameIndex());
6506 
6507  if(m_BufferImageUsage != 0)
6508  {
6509  json.WriteString("Usage");
6510  json.WriteNumber(m_BufferImageUsage);
6511  }
6512 }
6513 
6514 #endif
6515 
6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6517 {
6518  VMA_ASSERT(IsUserDataString());
6519  if(m_pUserData != VMA_NULL)
6520  {
6521  char* const oldStr = (char*)m_pUserData;
6522  const size_t oldStrLen = strlen(oldStr);
6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6524  m_pUserData = VMA_NULL;
6525  }
6526 }
6527 
6528 void VmaAllocation_T::BlockAllocMap()
6529 {
6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6531 
6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6533  {
6534  ++m_MapCount;
6535  }
6536  else
6537  {
6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6539  }
6540 }
6541 
6542 void VmaAllocation_T::BlockAllocUnmap()
6543 {
6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6545 
6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6547  {
6548  --m_MapCount;
6549  }
6550  else
6551  {
6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6553  }
6554 }
6555 
6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6557 {
6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6559 
6560  if(m_MapCount != 0)
6561  {
6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6563  {
6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6565  *ppData = m_DedicatedAllocation.m_pMappedData;
6566  ++m_MapCount;
6567  return VK_SUCCESS;
6568  }
6569  else
6570  {
6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6572  return VK_ERROR_MEMORY_MAP_FAILED;
6573  }
6574  }
6575  else
6576  {
6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6578  hAllocator->m_hDevice,
6579  m_DedicatedAllocation.m_hMemory,
6580  0, // offset
6581  VK_WHOLE_SIZE,
6582  0, // flags
6583  ppData);
6584  if(result == VK_SUCCESS)
6585  {
6586  m_DedicatedAllocation.m_pMappedData = *ppData;
6587  m_MapCount = 1;
6588  }
6589  return result;
6590  }
6591 }
6592 
6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6594 {
6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6596 
6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6598  {
6599  --m_MapCount;
6600  if(m_MapCount == 0)
6601  {
6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6604  hAllocator->m_hDevice,
6605  m_DedicatedAllocation.m_hMemory);
6606  }
6607  }
6608  else
6609  {
6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6611  }
6612 }
6613 
6614 #if VMA_STATS_STRING_ENABLED
6615 
6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6617 {
6618  json.BeginObject();
6619 
6620  json.WriteString("Blocks");
6621  json.WriteNumber(stat.blockCount);
6622 
6623  json.WriteString("Allocations");
6624  json.WriteNumber(stat.allocationCount);
6625 
6626  json.WriteString("UnusedRanges");
6627  json.WriteNumber(stat.unusedRangeCount);
6628 
6629  json.WriteString("UsedBytes");
6630  json.WriteNumber(stat.usedBytes);
6631 
6632  json.WriteString("UnusedBytes");
6633  json.WriteNumber(stat.unusedBytes);
6634 
6635  if(stat.allocationCount > 1)
6636  {
6637  json.WriteString("AllocationSize");
6638  json.BeginObject(true);
6639  json.WriteString("Min");
6640  json.WriteNumber(stat.allocationSizeMin);
6641  json.WriteString("Avg");
6642  json.WriteNumber(stat.allocationSizeAvg);
6643  json.WriteString("Max");
6644  json.WriteNumber(stat.allocationSizeMax);
6645  json.EndObject();
6646  }
6647 
6648  if(stat.unusedRangeCount > 1)
6649  {
6650  json.WriteString("UnusedRangeSize");
6651  json.BeginObject(true);
6652  json.WriteString("Min");
6653  json.WriteNumber(stat.unusedRangeSizeMin);
6654  json.WriteString("Avg");
6655  json.WriteNumber(stat.unusedRangeSizeAvg);
6656  json.WriteString("Max");
6657  json.WriteNumber(stat.unusedRangeSizeMax);
6658  json.EndObject();
6659  }
6660 
6661  json.EndObject();
6662 }
6663 
6664 #endif // #if VMA_STATS_STRING_ENABLED
6665 
6666 struct VmaSuballocationItemSizeLess
6667 {
6668  bool operator()(
6669  const VmaSuballocationList::iterator lhs,
6670  const VmaSuballocationList::iterator rhs) const
6671  {
6672  return lhs->size < rhs->size;
6673  }
6674  bool operator()(
6675  const VmaSuballocationList::iterator lhs,
6676  VkDeviceSize rhsSize) const
6677  {
6678  return lhs->size < rhsSize;
6679  }
6680 };
6681 
6682 
6684 // class VmaBlockMetadata
6685 
6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6687  m_Size(0),
6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6689 {
6690 }
6691 
6692 #if VMA_STATS_STRING_ENABLED
6693 
6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6695  VkDeviceSize unusedBytes,
6696  size_t allocationCount,
6697  size_t unusedRangeCount) const
6698 {
6699  json.BeginObject();
6700 
6701  json.WriteString("TotalBytes");
6702  json.WriteNumber(GetSize());
6703 
6704  json.WriteString("UnusedBytes");
6705  json.WriteNumber(unusedBytes);
6706 
6707  json.WriteString("Allocations");
6708  json.WriteNumber((uint64_t)allocationCount);
6709 
6710  json.WriteString("UnusedRanges");
6711  json.WriteNumber((uint64_t)unusedRangeCount);
6712 
6713  json.WriteString("Suballocations");
6714  json.BeginArray();
6715 }
6716 
6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6718  VkDeviceSize offset,
6719  VmaAllocation hAllocation) const
6720 {
6721  json.BeginObject(true);
6722 
6723  json.WriteString("Offset");
6724  json.WriteNumber(offset);
6725 
6726  hAllocation->PrintParameters(json);
6727 
6728  json.EndObject();
6729 }
6730 
6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6732  VkDeviceSize offset,
6733  VkDeviceSize size) const
6734 {
6735  json.BeginObject(true);
6736 
6737  json.WriteString("Offset");
6738  json.WriteNumber(offset);
6739 
6740  json.WriteString("Type");
6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6742 
6743  json.WriteString("Size");
6744  json.WriteNumber(size);
6745 
6746  json.EndObject();
6747 }
6748 
6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6750 {
6751  json.EndArray();
6752  json.EndObject();
6753 }
6754 
6755 #endif // #if VMA_STATS_STRING_ENABLED
6756 
6758 // class VmaBlockMetadata_Generic
6759 
6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6761  VmaBlockMetadata(hAllocator),
6762  m_FreeCount(0),
6763  m_SumFreeSize(0),
6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6766 {
6767 }
6768 
6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6770 {
6771 }
6772 
6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6774 {
6775  VmaBlockMetadata::Init(size);
6776 
6777  m_FreeCount = 1;
6778  m_SumFreeSize = size;
6779 
6780  VmaSuballocation suballoc = {};
6781  suballoc.offset = 0;
6782  suballoc.size = size;
6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6784  suballoc.hAllocation = VK_NULL_HANDLE;
6785 
6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6787  m_Suballocations.push_back(suballoc);
6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6789  --suballocItem;
6790  m_FreeSuballocationsBySize.push_back(suballocItem);
6791 }
6792 
6793 bool VmaBlockMetadata_Generic::Validate() const
6794 {
6795  VMA_VALIDATE(!m_Suballocations.empty());
6796 
6797  // Expected offset of new suballocation as calculated from previous ones.
6798  VkDeviceSize calculatedOffset = 0;
6799  // Expected number of free suballocations as calculated from traversing their list.
6800  uint32_t calculatedFreeCount = 0;
6801  // Expected sum size of free suballocations as calculated from traversing their list.
6802  VkDeviceSize calculatedSumFreeSize = 0;
6803  // Expected number of free suballocations that should be registered in
6804  // m_FreeSuballocationsBySize calculated from traversing their list.
6805  size_t freeSuballocationsToRegister = 0;
6806  // True if previous visited suballocation was free.
6807  bool prevFree = false;
6808 
6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6810  suballocItem != m_Suballocations.cend();
6811  ++suballocItem)
6812  {
6813  const VmaSuballocation& subAlloc = *suballocItem;
6814 
6815  // Actual offset of this suballocation doesn't match expected one.
6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6817 
6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6819  // Two adjacent free suballocations are invalid. They should be merged.
6820  VMA_VALIDATE(!prevFree || !currFree);
6821 
6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6823 
6824  if(currFree)
6825  {
6826  calculatedSumFreeSize += subAlloc.size;
6827  ++calculatedFreeCount;
6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6829  {
6830  ++freeSuballocationsToRegister;
6831  }
6832 
6833  // Margin required between allocations - every free space must be at least that large.
6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6835  }
6836  else
6837  {
6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6840 
6841  // Margin required between allocations - previous allocation must be free.
6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6843  }
6844 
6845  calculatedOffset += subAlloc.size;
6846  prevFree = currFree;
6847  }
6848 
6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6850  // match expected one.
6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6852 
6853  VkDeviceSize lastSize = 0;
6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6855  {
6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6857 
6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6860  // They must be sorted by size ascending.
6861  VMA_VALIDATE(suballocItem->size >= lastSize);
6862 
6863  lastSize = suballocItem->size;
6864  }
6865 
6866  // Check if totals match calculacted values.
6867  VMA_VALIDATE(ValidateFreeSuballocationList());
6868  VMA_VALIDATE(calculatedOffset == GetSize());
6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6871 
6872  return true;
6873 }
6874 
6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6876 {
6877  if(!m_FreeSuballocationsBySize.empty())
6878  {
6879  return m_FreeSuballocationsBySize.back()->size;
6880  }
6881  else
6882  {
6883  return 0;
6884  }
6885 }
6886 
6887 bool VmaBlockMetadata_Generic::IsEmpty() const
6888 {
6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6890 }
6891 
6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6893 {
6894  outInfo.blockCount = 1;
6895 
6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6897  outInfo.allocationCount = rangeCount - m_FreeCount;
6898  outInfo.unusedRangeCount = m_FreeCount;
6899 
6900  outInfo.unusedBytes = m_SumFreeSize;
6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6902 
6903  outInfo.allocationSizeMin = UINT64_MAX;
6904  outInfo.allocationSizeMax = 0;
6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
6906  outInfo.unusedRangeSizeMax = 0;
6907 
6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6909  suballocItem != m_Suballocations.cend();
6910  ++suballocItem)
6911  {
6912  const VmaSuballocation& suballoc = *suballocItem;
6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6914  {
6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6917  }
6918  else
6919  {
6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6922  }
6923  }
6924 }
6925 
6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6927 {
6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6929 
6930  inoutStats.size += GetSize();
6931  inoutStats.unusedSize += m_SumFreeSize;
6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
6933  inoutStats.unusedRangeCount += m_FreeCount;
6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6935 }
6936 
6937 #if VMA_STATS_STRING_ENABLED
6938 
6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6940 {
6941  PrintDetailedMap_Begin(json,
6942  m_SumFreeSize, // unusedBytes
6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6944  m_FreeCount); // unusedRangeCount
6945 
6946  size_t i = 0;
6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6948  suballocItem != m_Suballocations.cend();
6949  ++suballocItem, ++i)
6950  {
6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6952  {
6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6954  }
6955  else
6956  {
6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6958  }
6959  }
6960 
6961  PrintDetailedMap_End(json);
6962 }
6963 
6964 #endif // #if VMA_STATS_STRING_ENABLED
6965 
6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6967  uint32_t currentFrameIndex,
6968  uint32_t frameInUseCount,
6969  VkDeviceSize bufferImageGranularity,
6970  VkDeviceSize allocSize,
6971  VkDeviceSize allocAlignment,
6972  bool upperAddress,
6973  VmaSuballocationType allocType,
6974  bool canMakeOtherLost,
6975  uint32_t strategy,
6976  VmaAllocationRequest* pAllocationRequest)
6977 {
6978  VMA_ASSERT(allocSize > 0);
6979  VMA_ASSERT(!upperAddress);
6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6982  VMA_HEAVY_ASSERT(Validate());
6983 
6984  // There is not enough total free space in this block to fullfill the request: Early return.
6985  if(canMakeOtherLost == false &&
6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6987  {
6988  return false;
6989  }
6990 
6991  // New algorithm, efficiently searching freeSuballocationsBySize.
6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6993  if(freeSuballocCount > 0)
6994  {
6996  {
6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6999  m_FreeSuballocationsBySize.data(),
7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7001  allocSize + 2 * VMA_DEBUG_MARGIN,
7002  VmaSuballocationItemSizeLess());
7003  size_t index = it - m_FreeSuballocationsBySize.data();
7004  for(; index < freeSuballocCount; ++index)
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  m_FreeSuballocationsBySize[index],
7014  false, // canMakeOtherLost
7015  &pAllocationRequest->offset,
7016  &pAllocationRequest->itemsToMakeLostCount,
7017  &pAllocationRequest->sumFreeSize,
7018  &pAllocationRequest->sumItemSize))
7019  {
7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7021  return true;
7022  }
7023  }
7024  }
7025  else // WORST_FIT, FIRST_FIT
7026  {
7027  // Search staring from biggest suballocations.
7028  for(size_t index = freeSuballocCount; index--; )
7029  {
7030  if(CheckAllocation(
7031  currentFrameIndex,
7032  frameInUseCount,
7033  bufferImageGranularity,
7034  allocSize,
7035  allocAlignment,
7036  allocType,
7037  m_FreeSuballocationsBySize[index],
7038  false, // canMakeOtherLost
7039  &pAllocationRequest->offset,
7040  &pAllocationRequest->itemsToMakeLostCount,
7041  &pAllocationRequest->sumFreeSize,
7042  &pAllocationRequest->sumItemSize))
7043  {
7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7045  return true;
7046  }
7047  }
7048  }
7049  }
7050 
7051  if(canMakeOtherLost)
7052  {
7053  // Brute-force algorithm. TODO: Come up with something better.
7054 
7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7057 
7058  VmaAllocationRequest tmpAllocRequest = {};
7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7060  suballocIt != m_Suballocations.end();
7061  ++suballocIt)
7062  {
7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7064  suballocIt->hAllocation->CanBecomeLost())
7065  {
7066  if(CheckAllocation(
7067  currentFrameIndex,
7068  frameInUseCount,
7069  bufferImageGranularity,
7070  allocSize,
7071  allocAlignment,
7072  allocType,
7073  suballocIt,
7074  canMakeOtherLost,
7075  &tmpAllocRequest.offset,
7076  &tmpAllocRequest.itemsToMakeLostCount,
7077  &tmpAllocRequest.sumFreeSize,
7078  &tmpAllocRequest.sumItemSize))
7079  {
7080  tmpAllocRequest.item = suballocIt;
7081 
7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7084  {
7085  *pAllocationRequest = tmpAllocRequest;
7086  }
7087  }
7088  }
7089  }
7090 
7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7092  {
7093  return true;
7094  }
7095  }
7096 
7097  return false;
7098 }
7099 
7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7101  uint32_t currentFrameIndex,
7102  uint32_t frameInUseCount,
7103  VmaAllocationRequest* pAllocationRequest)
7104 {
7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
7106  {
7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7108  {
7109  ++pAllocationRequest->item;
7110  }
7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7115  {
7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7117  --pAllocationRequest->itemsToMakeLostCount;
7118  }
7119  else
7120  {
7121  return false;
7122  }
7123  }
7124 
7125  VMA_HEAVY_ASSERT(Validate());
7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7128 
7129  return true;
7130 }
7131 
7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7133 {
7134  uint32_t lostAllocationCount = 0;
7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7136  it != m_Suballocations.end();
7137  ++it)
7138  {
7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7140  it->hAllocation->CanBecomeLost() &&
7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7142  {
7143  it = FreeSuballocation(it);
7144  ++lostAllocationCount;
7145  }
7146  }
7147  return lostAllocationCount;
7148 }
7149 
7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7151 {
7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7153  it != m_Suballocations.end();
7154  ++it)
7155  {
7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7157  {
7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7159  {
7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7161  return VK_ERROR_VALIDATION_FAILED_EXT;
7162  }
7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7164  {
7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7166  return VK_ERROR_VALIDATION_FAILED_EXT;
7167  }
7168  }
7169  }
7170 
7171  return VK_SUCCESS;
7172 }
7173 
7174 void VmaBlockMetadata_Generic::Alloc(
7175  const VmaAllocationRequest& request,
7176  VmaSuballocationType type,
7177  VkDeviceSize allocSize,
7178  bool upperAddress,
7179  VmaAllocation hAllocation)
7180 {
7181  VMA_ASSERT(!upperAddress);
7182  VMA_ASSERT(request.item != m_Suballocations.end());
7183  VmaSuballocation& suballoc = *request.item;
7184  // Given suballocation is a free block.
7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7186  // Given offset is inside this suballocation.
7187  VMA_ASSERT(request.offset >= suballoc.offset);
7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7191 
7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7193  // it to become used.
7194  UnregisterFreeSuballocation(request.item);
7195 
7196  suballoc.offset = request.offset;
7197  suballoc.size = allocSize;
7198  suballoc.type = type;
7199  suballoc.hAllocation = hAllocation;
7200 
7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7202  if(paddingEnd)
7203  {
7204  VmaSuballocation paddingSuballoc = {};
7205  paddingSuballoc.offset = request.offset + allocSize;
7206  paddingSuballoc.size = paddingEnd;
7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7208  VmaSuballocationList::iterator next = request.item;
7209  ++next;
7210  const VmaSuballocationList::iterator paddingEndItem =
7211  m_Suballocations.insert(next, paddingSuballoc);
7212  RegisterFreeSuballocation(paddingEndItem);
7213  }
7214 
7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7216  if(paddingBegin)
7217  {
7218  VmaSuballocation paddingSuballoc = {};
7219  paddingSuballoc.offset = request.offset - paddingBegin;
7220  paddingSuballoc.size = paddingBegin;
7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7222  const VmaSuballocationList::iterator paddingBeginItem =
7223  m_Suballocations.insert(request.item, paddingSuballoc);
7224  RegisterFreeSuballocation(paddingBeginItem);
7225  }
7226 
7227  // Update totals.
7228  m_FreeCount = m_FreeCount - 1;
7229  if(paddingBegin > 0)
7230  {
7231  ++m_FreeCount;
7232  }
7233  if(paddingEnd > 0)
7234  {
7235  ++m_FreeCount;
7236  }
7237  m_SumFreeSize -= allocSize;
7238 }
7239 
7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7241 {
7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7243  suballocItem != m_Suballocations.end();
7244  ++suballocItem)
7245  {
7246  VmaSuballocation& suballoc = *suballocItem;
7247  if(suballoc.hAllocation == allocation)
7248  {
7249  FreeSuballocation(suballocItem);
7250  VMA_HEAVY_ASSERT(Validate());
7251  return;
7252  }
7253  }
7254  VMA_ASSERT(0 && "Not found!");
7255 }
7256 
7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7258 {
7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7260  suballocItem != m_Suballocations.end();
7261  ++suballocItem)
7262  {
7263  VmaSuballocation& suballoc = *suballocItem;
7264  if(suballoc.offset == offset)
7265  {
7266  FreeSuballocation(suballocItem);
7267  return;
7268  }
7269  }
7270  VMA_ASSERT(0 && "Not found!");
7271 }
7272 
7273 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7274 {
7275  typedef VmaSuballocationList::iterator iter_type;
7276  for(iter_type suballocItem = m_Suballocations.begin();
7277  suballocItem != m_Suballocations.end();
7278  ++suballocItem)
7279  {
7280  VmaSuballocation& suballoc = *suballocItem;
7281  if(suballoc.hAllocation == alloc)
7282  {
7283  iter_type nextItem = suballocItem;
7284  ++nextItem;
7285 
7286  // Should have been ensured on higher level.
7287  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7288 
7289  // Shrinking.
7290  if(newSize < alloc->GetSize())
7291  {
7292  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7293 
7294  // There is next item.
7295  if(nextItem != m_Suballocations.end())
7296  {
7297  // Next item is free.
7298  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7299  {
7300  // Grow this next item backward.
7301  UnregisterFreeSuballocation(nextItem);
7302  nextItem->offset -= sizeDiff;
7303  nextItem->size += sizeDiff;
7304  RegisterFreeSuballocation(nextItem);
7305  }
7306  // Next item is not free.
7307  else
7308  {
7309  // Create free item after current one.
7310  VmaSuballocation newFreeSuballoc;
7311  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7312  newFreeSuballoc.offset = suballoc.offset + newSize;
7313  newFreeSuballoc.size = sizeDiff;
7314  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7315  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7316  RegisterFreeSuballocation(newFreeSuballocIt);
7317 
7318  ++m_FreeCount;
7319  }
7320  }
7321  // This is the last item.
7322  else
7323  {
7324  // Create free item at the end.
7325  VmaSuballocation newFreeSuballoc;
7326  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7327  newFreeSuballoc.offset = suballoc.offset + newSize;
7328  newFreeSuballoc.size = sizeDiff;
7329  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7330  m_Suballocations.push_back(newFreeSuballoc);
7331 
7332  iter_type newFreeSuballocIt = m_Suballocations.end();
7333  RegisterFreeSuballocation(--newFreeSuballocIt);
7334 
7335  ++m_FreeCount;
7336  }
7337 
7338  suballoc.size = newSize;
7339  m_SumFreeSize += sizeDiff;
7340  }
7341  // Growing.
7342  else
7343  {
7344  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7345 
7346  // There is next item.
7347  if(nextItem != m_Suballocations.end())
7348  {
7349  // Next item is free.
7350  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7351  {
7352  // There is not enough free space, including margin.
7353  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7354  {
7355  return false;
7356  }
7357 
7358  // There is more free space than required.
7359  if(nextItem->size > sizeDiff)
7360  {
7361  // Move and shrink this next item.
7362  UnregisterFreeSuballocation(nextItem);
7363  nextItem->offset += sizeDiff;
7364  nextItem->size -= sizeDiff;
7365  RegisterFreeSuballocation(nextItem);
7366  }
7367  // There is exactly the amount of free space required.
7368  else
7369  {
7370  // Remove this next free item.
7371  UnregisterFreeSuballocation(nextItem);
7372  m_Suballocations.erase(nextItem);
7373  --m_FreeCount;
7374  }
7375  }
7376  // Next item is not free - there is no space to grow.
7377  else
7378  {
7379  return false;
7380  }
7381  }
7382  // This is the last item - there is no space to grow.
7383  else
7384  {
7385  return false;
7386  }
7387 
7388  suballoc.size = newSize;
7389  m_SumFreeSize -= sizeDiff;
7390  }
7391 
7392  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7393  return true;
7394  }
7395  }
7396  VMA_ASSERT(0 && "Not found!");
7397  return false;
7398 }
7399 
7400 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7401 {
7402  VkDeviceSize lastSize = 0;
7403  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7404  {
7405  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7406 
7407  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7408  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7409  VMA_VALIDATE(it->size >= lastSize);
7410  lastSize = it->size;
7411  }
7412  return true;
7413 }
7414 
7415 bool VmaBlockMetadata_Generic::CheckAllocation(
7416  uint32_t currentFrameIndex,
7417  uint32_t frameInUseCount,
7418  VkDeviceSize bufferImageGranularity,
7419  VkDeviceSize allocSize,
7420  VkDeviceSize allocAlignment,
7421  VmaSuballocationType allocType,
7422  VmaSuballocationList::const_iterator suballocItem,
7423  bool canMakeOtherLost,
7424  VkDeviceSize* pOffset,
7425  size_t* itemsToMakeLostCount,
7426  VkDeviceSize* pSumFreeSize,
7427  VkDeviceSize* pSumItemSize) const
7428 {
7429  VMA_ASSERT(allocSize > 0);
7430  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7431  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7432  VMA_ASSERT(pOffset != VMA_NULL);
7433 
7434  *itemsToMakeLostCount = 0;
7435  *pSumFreeSize = 0;
7436  *pSumItemSize = 0;
7437 
7438  if(canMakeOtherLost)
7439  {
7440  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7441  {
7442  *pSumFreeSize = suballocItem->size;
7443  }
7444  else
7445  {
7446  if(suballocItem->hAllocation->CanBecomeLost() &&
7447  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7448  {
7449  ++*itemsToMakeLostCount;
7450  *pSumItemSize = suballocItem->size;
7451  }
7452  else
7453  {
7454  return false;
7455  }
7456  }
7457 
7458  // Remaining size is too small for this request: Early return.
7459  if(GetSize() - suballocItem->offset < allocSize)
7460  {
7461  return false;
7462  }
7463 
7464  // Start from offset equal to beginning of this suballocation.
7465  *pOffset = suballocItem->offset;
7466 
7467  // Apply VMA_DEBUG_MARGIN at the beginning.
7468  if(VMA_DEBUG_MARGIN > 0)
7469  {
7470  *pOffset += VMA_DEBUG_MARGIN;
7471  }
7472 
7473  // Apply alignment.
7474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7475 
7476  // Check previous suballocations for BufferImageGranularity conflicts.
7477  // Make bigger alignment if necessary.
7478  if(bufferImageGranularity > 1)
7479  {
7480  bool bufferImageGranularityConflict = false;
7481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7482  while(prevSuballocItem != m_Suballocations.cbegin())
7483  {
7484  --prevSuballocItem;
7485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7487  {
7488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7489  {
7490  bufferImageGranularityConflict = true;
7491  break;
7492  }
7493  }
7494  else
7495  // Already on previous page.
7496  break;
7497  }
7498  if(bufferImageGranularityConflict)
7499  {
7500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7501  }
7502  }
7503 
7504  // Now that we have final *pOffset, check if we are past suballocItem.
7505  // If yes, return false - this function should be called for another suballocItem as starting point.
7506  if(*pOffset >= suballocItem->offset + suballocItem->size)
7507  {
7508  return false;
7509  }
7510 
7511  // Calculate padding at the beginning based on current offset.
7512  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7513 
7514  // Calculate required margin at the end.
7515  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7516 
7517  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7518  // Another early return check.
7519  if(suballocItem->offset + totalSize > GetSize())
7520  {
7521  return false;
7522  }
7523 
7524  // Advance lastSuballocItem until desired size is reached.
7525  // Update itemsToMakeLostCount.
7526  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7527  if(totalSize > suballocItem->size)
7528  {
7529  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7530  while(remainingSize > 0)
7531  {
7532  ++lastSuballocItem;
7533  if(lastSuballocItem == m_Suballocations.cend())
7534  {
7535  return false;
7536  }
7537  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7538  {
7539  *pSumFreeSize += lastSuballocItem->size;
7540  }
7541  else
7542  {
7543  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7544  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7545  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7546  {
7547  ++*itemsToMakeLostCount;
7548  *pSumItemSize += lastSuballocItem->size;
7549  }
7550  else
7551  {
7552  return false;
7553  }
7554  }
7555  remainingSize = (lastSuballocItem->size < remainingSize) ?
7556  remainingSize - lastSuballocItem->size : 0;
7557  }
7558  }
7559 
7560  // Check next suballocations for BufferImageGranularity conflicts.
7561  // If conflict exists, we must mark more allocations lost or fail.
7562  if(bufferImageGranularity > 1)
7563  {
7564  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7565  ++nextSuballocItem;
7566  while(nextSuballocItem != m_Suballocations.cend())
7567  {
7568  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7569  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7570  {
7571  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7572  {
7573  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7574  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7575  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7576  {
7577  ++*itemsToMakeLostCount;
7578  }
7579  else
7580  {
7581  return false;
7582  }
7583  }
7584  }
7585  else
7586  {
7587  // Already on next page.
7588  break;
7589  }
7590  ++nextSuballocItem;
7591  }
7592  }
7593  }
7594  else
7595  {
7596  const VmaSuballocation& suballoc = *suballocItem;
7597  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7598 
7599  *pSumFreeSize = suballoc.size;
7600 
7601  // Size of this suballocation is too small for this request: Early return.
7602  if(suballoc.size < allocSize)
7603  {
7604  return false;
7605  }
7606 
7607  // Start from offset equal to beginning of this suballocation.
7608  *pOffset = suballoc.offset;
7609 
7610  // Apply VMA_DEBUG_MARGIN at the beginning.
7611  if(VMA_DEBUG_MARGIN > 0)
7612  {
7613  *pOffset += VMA_DEBUG_MARGIN;
7614  }
7615 
7616  // Apply alignment.
7617  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7618 
7619  // Check previous suballocations for BufferImageGranularity conflicts.
7620  // Make bigger alignment if necessary.
7621  if(bufferImageGranularity > 1)
7622  {
7623  bool bufferImageGranularityConflict = false;
7624  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7625  while(prevSuballocItem != m_Suballocations.cbegin())
7626  {
7627  --prevSuballocItem;
7628  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7629  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7630  {
7631  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7632  {
7633  bufferImageGranularityConflict = true;
7634  break;
7635  }
7636  }
7637  else
7638  // Already on previous page.
7639  break;
7640  }
7641  if(bufferImageGranularityConflict)
7642  {
7643  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7644  }
7645  }
7646 
7647  // Calculate padding at the beginning based on current offset.
7648  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7649 
7650  // Calculate required margin at the end.
7651  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7652 
7653  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7654  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7655  {
7656  return false;
7657  }
7658 
7659  // Check next suballocations for BufferImageGranularity conflicts.
7660  // If conflict exists, allocation cannot be made here.
7661  if(bufferImageGranularity > 1)
7662  {
7663  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7664  ++nextSuballocItem;
7665  while(nextSuballocItem != m_Suballocations.cend())
7666  {
7667  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7668  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7669  {
7670  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7671  {
7672  return false;
7673  }
7674  }
7675  else
7676  {
7677  // Already on next page.
7678  break;
7679  }
7680  ++nextSuballocItem;
7681  }
7682  }
7683  }
7684 
7685  // All tests passed: Success. pOffset is already filled.
7686  return true;
7687 }
7688 
7689 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7690 {
7691  VMA_ASSERT(item != m_Suballocations.end());
7692  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7693 
7694  VmaSuballocationList::iterator nextItem = item;
7695  ++nextItem;
7696  VMA_ASSERT(nextItem != m_Suballocations.end());
7697  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7698 
7699  item->size += nextItem->size;
7700  --m_FreeCount;
7701  m_Suballocations.erase(nextItem);
7702 }
7703 
7704 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7705 {
7706  // Change this suballocation to be marked as free.
7707  VmaSuballocation& suballoc = *suballocItem;
7708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7709  suballoc.hAllocation = VK_NULL_HANDLE;
7710 
7711  // Update totals.
7712  ++m_FreeCount;
7713  m_SumFreeSize += suballoc.size;
7714 
7715  // Merge with previous and/or next suballocation if it's also free.
7716  bool mergeWithNext = false;
7717  bool mergeWithPrev = false;
7718 
7719  VmaSuballocationList::iterator nextItem = suballocItem;
7720  ++nextItem;
7721  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7722  {
7723  mergeWithNext = true;
7724  }
7725 
7726  VmaSuballocationList::iterator prevItem = suballocItem;
7727  if(suballocItem != m_Suballocations.begin())
7728  {
7729  --prevItem;
7730  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7731  {
7732  mergeWithPrev = true;
7733  }
7734  }
7735 
7736  if(mergeWithNext)
7737  {
7738  UnregisterFreeSuballocation(nextItem);
7739  MergeFreeWithNext(suballocItem);
7740  }
7741 
7742  if(mergeWithPrev)
7743  {
7744  UnregisterFreeSuballocation(prevItem);
7745  MergeFreeWithNext(prevItem);
7746  RegisterFreeSuballocation(prevItem);
7747  return prevItem;
7748  }
7749  else
7750  {
7751  RegisterFreeSuballocation(suballocItem);
7752  return suballocItem;
7753  }
7754 }
7755 
7756 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7757 {
7758  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7759  VMA_ASSERT(item->size > 0);
7760 
7761  // You may want to enable this validation at the beginning or at the end of
7762  // this function, depending on what do you want to check.
7763  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7764 
7765  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7766  {
7767  if(m_FreeSuballocationsBySize.empty())
7768  {
7769  m_FreeSuballocationsBySize.push_back(item);
7770  }
7771  else
7772  {
7773  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7774  }
7775  }
7776 
7777  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7778 }
7779 
7780 
7781 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7782 {
7783  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7784  VMA_ASSERT(item->size > 0);
7785 
7786  // You may want to enable this validation at the beginning or at the end of
7787  // this function, depending on what do you want to check.
7788  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7789 
7790  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7791  {
7792  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7793  m_FreeSuballocationsBySize.data(),
7794  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7795  item,
7796  VmaSuballocationItemSizeLess());
7797  for(size_t index = it - m_FreeSuballocationsBySize.data();
7798  index < m_FreeSuballocationsBySize.size();
7799  ++index)
7800  {
7801  if(m_FreeSuballocationsBySize[index] == item)
7802  {
7803  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7804  return;
7805  }
7806  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7807  }
7808  VMA_ASSERT(0 && "Not found.");
7809  }
7810 
7811  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7812 }
7813 
7815 // class VmaBlockMetadata_Linear
7816 
7817 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7818  VmaBlockMetadata(hAllocator),
7819  m_SumFreeSize(0),
7820  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7821  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7822  m_1stVectorIndex(0),
7823  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7824  m_1stNullItemsBeginCount(0),
7825  m_1stNullItemsMiddleCount(0),
7826  m_2ndNullItemsCount(0)
7827 {
7828 }
7829 
7830 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7831 {
7832 }
7833 
7834 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7835 {
7836  VmaBlockMetadata::Init(size);
7837  m_SumFreeSize = size;
7838 }
7839 
7840 bool VmaBlockMetadata_Linear::Validate() const
7841 {
7842  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7843  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7844 
7845  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7846  VMA_VALIDATE(!suballocations1st.empty() ||
7847  suballocations2nd.empty() ||
7848  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7849 
7850  if(!suballocations1st.empty())
7851  {
7852  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7853  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7854  // Null item at the end should be just pop_back().
7855  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7856  }
7857  if(!suballocations2nd.empty())
7858  {
7859  // Null item at the end should be just pop_back().
7860  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7861  }
7862 
7863  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7864  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7865 
7866  VkDeviceSize sumUsedSize = 0;
7867  const size_t suballoc1stCount = suballocations1st.size();
7868  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7869 
7870  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7871  {
7872  const size_t suballoc2ndCount = suballocations2nd.size();
7873  size_t nullItem2ndCount = 0;
7874  for(size_t i = 0; i < suballoc2ndCount; ++i)
7875  {
7876  const VmaSuballocation& suballoc = suballocations2nd[i];
7877  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7878 
7879  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7880  VMA_VALIDATE(suballoc.offset >= offset);
7881 
7882  if(!currFree)
7883  {
7884  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7885  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7886  sumUsedSize += suballoc.size;
7887  }
7888  else
7889  {
7890  ++nullItem2ndCount;
7891  }
7892 
7893  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7894  }
7895 
7896  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7897  }
7898 
7899  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7900  {
7901  const VmaSuballocation& suballoc = suballocations1st[i];
7902  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7903  suballoc.hAllocation == VK_NULL_HANDLE);
7904  }
7905 
7906  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7907 
7908  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7909  {
7910  const VmaSuballocation& suballoc = suballocations1st[i];
7911  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7912 
7913  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7914  VMA_VALIDATE(suballoc.offset >= offset);
7915  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7916 
7917  if(!currFree)
7918  {
7919  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7920  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7921  sumUsedSize += suballoc.size;
7922  }
7923  else
7924  {
7925  ++nullItem1stCount;
7926  }
7927 
7928  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7929  }
7930  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7931 
7932  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7933  {
7934  const size_t suballoc2ndCount = suballocations2nd.size();
7935  size_t nullItem2ndCount = 0;
7936  for(size_t i = suballoc2ndCount; i--; )
7937  {
7938  const VmaSuballocation& suballoc = suballocations2nd[i];
7939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7940 
7941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7942  VMA_VALIDATE(suballoc.offset >= offset);
7943 
7944  if(!currFree)
7945  {
7946  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7947  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7948  sumUsedSize += suballoc.size;
7949  }
7950  else
7951  {
7952  ++nullItem2ndCount;
7953  }
7954 
7955  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7956  }
7957 
7958  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7959  }
7960 
7961  VMA_VALIDATE(offset <= GetSize());
7962  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7963 
7964  return true;
7965 }
7966 
7967 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7968 {
7969  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7970  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7971 }
7972 
7973 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7974 {
7975  const VkDeviceSize size = GetSize();
7976 
7977  /*
7978  We don't consider gaps inside allocation vectors with freed allocations because
7979  they are not suitable for reuse in linear allocator. We consider only space that
7980  is available for new allocations.
7981  */
7982  if(IsEmpty())
7983  {
7984  return size;
7985  }
7986 
7987  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7988 
7989  switch(m_2ndVectorMode)
7990  {
7991  case SECOND_VECTOR_EMPTY:
7992  /*
7993  Available space is after end of 1st, as well as before beginning of 1st (which
7994  whould make it a ring buffer).
7995  */
7996  {
7997  const size_t suballocations1stCount = suballocations1st.size();
7998  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7999  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8000  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8001  return VMA_MAX(
8002  firstSuballoc.offset,
8003  size - (lastSuballoc.offset + lastSuballoc.size));
8004  }
8005  break;
8006 
8007  case SECOND_VECTOR_RING_BUFFER:
8008  /*
8009  Available space is only between end of 2nd and beginning of 1st.
8010  */
8011  {
8012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8013  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8014  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8015  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8016  }
8017  break;
8018 
8019  case SECOND_VECTOR_DOUBLE_STACK:
8020  /*
8021  Available space is only between end of 1st and top of 2nd.
8022  */
8023  {
8024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8025  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8026  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8027  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8028  }
8029  break;
8030 
8031  default:
8032  VMA_ASSERT(0);
8033  return 0;
8034  }
8035 }
8036 
8037 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8038 {
8039  const VkDeviceSize size = GetSize();
8040  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8041  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8042  const size_t suballoc1stCount = suballocations1st.size();
8043  const size_t suballoc2ndCount = suballocations2nd.size();
8044 
8045  outInfo.blockCount = 1;
8046  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8047  outInfo.unusedRangeCount = 0;
8048  outInfo.usedBytes = 0;
8049  outInfo.allocationSizeMin = UINT64_MAX;
8050  outInfo.allocationSizeMax = 0;
8051  outInfo.unusedRangeSizeMin = UINT64_MAX;
8052  outInfo.unusedRangeSizeMax = 0;
8053 
8054  VkDeviceSize lastOffset = 0;
8055 
8056  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8057  {
8058  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8059  size_t nextAlloc2ndIndex = 0;
8060  while(lastOffset < freeSpace2ndTo1stEnd)
8061  {
8062  // Find next non-null allocation or move nextAllocIndex to the end.
8063  while(nextAlloc2ndIndex < suballoc2ndCount &&
8064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8065  {
8066  ++nextAlloc2ndIndex;
8067  }
8068 
8069  // Found non-null allocation.
8070  if(nextAlloc2ndIndex < suballoc2ndCount)
8071  {
8072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8073 
8074  // 1. Process free space before this allocation.
8075  if(lastOffset < suballoc.offset)
8076  {
8077  // There is free space from lastOffset to suballoc.offset.
8078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8079  ++outInfo.unusedRangeCount;
8080  outInfo.unusedBytes += unusedRangeSize;
8081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8083  }
8084 
8085  // 2. Process this allocation.
8086  // There is allocation with suballoc.offset, suballoc.size.
8087  outInfo.usedBytes += suballoc.size;
8088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8090 
8091  // 3. Prepare for next iteration.
8092  lastOffset = suballoc.offset + suballoc.size;
8093  ++nextAlloc2ndIndex;
8094  }
8095  // We are at the end.
8096  else
8097  {
8098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8099  if(lastOffset < freeSpace2ndTo1stEnd)
8100  {
8101  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8102  ++outInfo.unusedRangeCount;
8103  outInfo.unusedBytes += unusedRangeSize;
8104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8106  }
8107 
8108  // End of loop.
8109  lastOffset = freeSpace2ndTo1stEnd;
8110  }
8111  }
8112  }
8113 
8114  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8115  const VkDeviceSize freeSpace1stTo2ndEnd =
8116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8117  while(lastOffset < freeSpace1stTo2ndEnd)
8118  {
8119  // Find next non-null allocation or move nextAllocIndex to the end.
8120  while(nextAlloc1stIndex < suballoc1stCount &&
8121  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8122  {
8123  ++nextAlloc1stIndex;
8124  }
8125 
8126  // Found non-null allocation.
8127  if(nextAlloc1stIndex < suballoc1stCount)
8128  {
8129  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8130 
8131  // 1. Process free space before this allocation.
8132  if(lastOffset < suballoc.offset)
8133  {
8134  // There is free space from lastOffset to suballoc.offset.
8135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8136  ++outInfo.unusedRangeCount;
8137  outInfo.unusedBytes += unusedRangeSize;
8138  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8139  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8140  }
8141 
8142  // 2. Process this allocation.
8143  // There is allocation with suballoc.offset, suballoc.size.
8144  outInfo.usedBytes += suballoc.size;
8145  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8146  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8147 
8148  // 3. Prepare for next iteration.
8149  lastOffset = suballoc.offset + suballoc.size;
8150  ++nextAlloc1stIndex;
8151  }
8152  // We are at the end.
8153  else
8154  {
8155  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8156  if(lastOffset < freeSpace1stTo2ndEnd)
8157  {
8158  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8159  ++outInfo.unusedRangeCount;
8160  outInfo.unusedBytes += unusedRangeSize;
8161  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8162  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8163  }
8164 
8165  // End of loop.
8166  lastOffset = freeSpace1stTo2ndEnd;
8167  }
8168  }
8169 
8170  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8171  {
8172  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8173  while(lastOffset < size)
8174  {
8175  // Find next non-null allocation or move nextAllocIndex to the end.
8176  while(nextAlloc2ndIndex != SIZE_MAX &&
8177  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8178  {
8179  --nextAlloc2ndIndex;
8180  }
8181 
8182  // Found non-null allocation.
8183  if(nextAlloc2ndIndex != SIZE_MAX)
8184  {
8185  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8186 
8187  // 1. Process free space before this allocation.
8188  if(lastOffset < suballoc.offset)
8189  {
8190  // There is free space from lastOffset to suballoc.offset.
8191  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8192  ++outInfo.unusedRangeCount;
8193  outInfo.unusedBytes += unusedRangeSize;
8194  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8195  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8196  }
8197 
8198  // 2. Process this allocation.
8199  // There is allocation with suballoc.offset, suballoc.size.
8200  outInfo.usedBytes += suballoc.size;
8201  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8202  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8203 
8204  // 3. Prepare for next iteration.
8205  lastOffset = suballoc.offset + suballoc.size;
8206  --nextAlloc2ndIndex;
8207  }
8208  // We are at the end.
8209  else
8210  {
8211  // There is free space from lastOffset to size.
8212  if(lastOffset < size)
8213  {
8214  const VkDeviceSize unusedRangeSize = size - lastOffset;
8215  ++outInfo.unusedRangeCount;
8216  outInfo.unusedBytes += unusedRangeSize;
8217  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8218  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8219  }
8220 
8221  // End of loop.
8222  lastOffset = size;
8223  }
8224  }
8225  }
8226 
8227  outInfo.unusedBytes = size - outInfo.usedBytes;
8228 }
8229 
8230 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8231 {
8232  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8233  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8234  const VkDeviceSize size = GetSize();
8235  const size_t suballoc1stCount = suballocations1st.size();
8236  const size_t suballoc2ndCount = suballocations2nd.size();
8237 
8238  inoutStats.size += size;
8239 
8240  VkDeviceSize lastOffset = 0;
8241 
8242  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8243  {
8244  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8245  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8246  while(lastOffset < freeSpace2ndTo1stEnd)
8247  {
8248  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8249  while(nextAlloc2ndIndex < suballoc2ndCount &&
8250  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8251  {
8252  ++nextAlloc2ndIndex;
8253  }
8254 
8255  // Found non-null allocation.
8256  if(nextAlloc2ndIndex < suballoc2ndCount)
8257  {
8258  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8259 
8260  // 1. Process free space before this allocation.
8261  if(lastOffset < suballoc.offset)
8262  {
8263  // There is free space from lastOffset to suballoc.offset.
8264  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8265  inoutStats.unusedSize += unusedRangeSize;
8266  ++inoutStats.unusedRangeCount;
8267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8268  }
8269 
8270  // 2. Process this allocation.
8271  // There is allocation with suballoc.offset, suballoc.size.
8272  ++inoutStats.allocationCount;
8273 
8274  // 3. Prepare for next iteration.
8275  lastOffset = suballoc.offset + suballoc.size;
8276  ++nextAlloc2ndIndex;
8277  }
8278  // We are at the end.
8279  else
8280  {
8281  if(lastOffset < freeSpace2ndTo1stEnd)
8282  {
8283  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8284  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8285  inoutStats.unusedSize += unusedRangeSize;
8286  ++inoutStats.unusedRangeCount;
8287  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8288  }
8289 
8290  // End of loop.
8291  lastOffset = freeSpace2ndTo1stEnd;
8292  }
8293  }
8294  }
8295 
8296  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8297  const VkDeviceSize freeSpace1stTo2ndEnd =
8298  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8299  while(lastOffset < freeSpace1stTo2ndEnd)
8300  {
8301  // Find next non-null allocation or move nextAllocIndex to the end.
8302  while(nextAlloc1stIndex < suballoc1stCount &&
8303  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8304  {
8305  ++nextAlloc1stIndex;
8306  }
8307 
8308  // Found non-null allocation.
8309  if(nextAlloc1stIndex < suballoc1stCount)
8310  {
8311  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8312 
8313  // 1. Process free space before this allocation.
8314  if(lastOffset < suballoc.offset)
8315  {
8316  // There is free space from lastOffset to suballoc.offset.
8317  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8318  inoutStats.unusedSize += unusedRangeSize;
8319  ++inoutStats.unusedRangeCount;
8320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8321  }
8322 
8323  // 2. Process this allocation.
8324  // There is allocation with suballoc.offset, suballoc.size.
8325  ++inoutStats.allocationCount;
8326 
8327  // 3. Prepare for next iteration.
8328  lastOffset = suballoc.offset + suballoc.size;
8329  ++nextAlloc1stIndex;
8330  }
8331  // We are at the end.
8332  else
8333  {
8334  if(lastOffset < freeSpace1stTo2ndEnd)
8335  {
8336  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8337  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8338  inoutStats.unusedSize += unusedRangeSize;
8339  ++inoutStats.unusedRangeCount;
8340  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8341  }
8342 
8343  // End of loop.
8344  lastOffset = freeSpace1stTo2ndEnd;
8345  }
8346  }
8347 
8348  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8349  {
8350  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8351  while(lastOffset < size)
8352  {
8353  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8354  while(nextAlloc2ndIndex != SIZE_MAX &&
8355  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8356  {
8357  --nextAlloc2ndIndex;
8358  }
8359 
8360  // Found non-null allocation.
8361  if(nextAlloc2ndIndex != SIZE_MAX)
8362  {
8363  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8364 
8365  // 1. Process free space before this allocation.
8366  if(lastOffset < suballoc.offset)
8367  {
8368  // There is free space from lastOffset to suballoc.offset.
8369  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8370  inoutStats.unusedSize += unusedRangeSize;
8371  ++inoutStats.unusedRangeCount;
8372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8373  }
8374 
8375  // 2. Process this allocation.
8376  // There is allocation with suballoc.offset, suballoc.size.
8377  ++inoutStats.allocationCount;
8378 
8379  // 3. Prepare for next iteration.
8380  lastOffset = suballoc.offset + suballoc.size;
8381  --nextAlloc2ndIndex;
8382  }
8383  // We are at the end.
8384  else
8385  {
8386  if(lastOffset < size)
8387  {
8388  // There is free space from lastOffset to size.
8389  const VkDeviceSize unusedRangeSize = size - lastOffset;
8390  inoutStats.unusedSize += unusedRangeSize;
8391  ++inoutStats.unusedRangeCount;
8392  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8393  }
8394 
8395  // End of loop.
8396  lastOffset = size;
8397  }
8398  }
8399  }
8400 }
8401 
8402 #if VMA_STATS_STRING_ENABLED
8403 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8404 {
8405  const VkDeviceSize size = GetSize();
8406  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8407  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8408  const size_t suballoc1stCount = suballocations1st.size();
8409  const size_t suballoc2ndCount = suballocations2nd.size();
8410 
8411  // FIRST PASS
8412 
8413  size_t unusedRangeCount = 0;
8414  VkDeviceSize usedBytes = 0;
8415 
8416  VkDeviceSize lastOffset = 0;
8417 
8418  size_t alloc2ndCount = 0;
8419  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8420  {
8421  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8422  size_t nextAlloc2ndIndex = 0;
8423  while(lastOffset < freeSpace2ndTo1stEnd)
8424  {
8425  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8426  while(nextAlloc2ndIndex < suballoc2ndCount &&
8427  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8428  {
8429  ++nextAlloc2ndIndex;
8430  }
8431 
8432  // Found non-null allocation.
8433  if(nextAlloc2ndIndex < suballoc2ndCount)
8434  {
8435  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8436 
8437  // 1. Process free space before this allocation.
8438  if(lastOffset < suballoc.offset)
8439  {
8440  // There is free space from lastOffset to suballoc.offset.
8441  ++unusedRangeCount;
8442  }
8443 
8444  // 2. Process this allocation.
8445  // There is allocation with suballoc.offset, suballoc.size.
8446  ++alloc2ndCount;
8447  usedBytes += suballoc.size;
8448 
8449  // 3. Prepare for next iteration.
8450  lastOffset = suballoc.offset + suballoc.size;
8451  ++nextAlloc2ndIndex;
8452  }
8453  // We are at the end.
8454  else
8455  {
8456  if(lastOffset < freeSpace2ndTo1stEnd)
8457  {
8458  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8459  ++unusedRangeCount;
8460  }
8461 
8462  // End of loop.
8463  lastOffset = freeSpace2ndTo1stEnd;
8464  }
8465  }
8466  }
8467 
8468  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8469  size_t alloc1stCount = 0;
8470  const VkDeviceSize freeSpace1stTo2ndEnd =
8471  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8472  while(lastOffset < freeSpace1stTo2ndEnd)
8473  {
8474  // Find next non-null allocation or move nextAllocIndex to the end.
8475  while(nextAlloc1stIndex < suballoc1stCount &&
8476  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8477  {
8478  ++nextAlloc1stIndex;
8479  }
8480 
8481  // Found non-null allocation.
8482  if(nextAlloc1stIndex < suballoc1stCount)
8483  {
8484  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8485 
8486  // 1. Process free space before this allocation.
8487  if(lastOffset < suballoc.offset)
8488  {
8489  // There is free space from lastOffset to suballoc.offset.
8490  ++unusedRangeCount;
8491  }
8492 
8493  // 2. Process this allocation.
8494  // There is allocation with suballoc.offset, suballoc.size.
8495  ++alloc1stCount;
8496  usedBytes += suballoc.size;
8497 
8498  // 3. Prepare for next iteration.
8499  lastOffset = suballoc.offset + suballoc.size;
8500  ++nextAlloc1stIndex;
8501  }
8502  // We are at the end.
8503  else
8504  {
8505  if(lastOffset < size)
8506  {
8507  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8508  ++unusedRangeCount;
8509  }
8510 
8511  // End of loop.
8512  lastOffset = freeSpace1stTo2ndEnd;
8513  }
8514  }
8515 
8516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8517  {
8518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8519  while(lastOffset < size)
8520  {
8521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8522  while(nextAlloc2ndIndex != SIZE_MAX &&
8523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8524  {
8525  --nextAlloc2ndIndex;
8526  }
8527 
8528  // Found non-null allocation.
8529  if(nextAlloc2ndIndex != SIZE_MAX)
8530  {
8531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8532 
8533  // 1. Process free space before this allocation.
8534  if(lastOffset < suballoc.offset)
8535  {
8536  // There is free space from lastOffset to suballoc.offset.
8537  ++unusedRangeCount;
8538  }
8539 
8540  // 2. Process this allocation.
8541  // There is allocation with suballoc.offset, suballoc.size.
8542  ++alloc2ndCount;
8543  usedBytes += suballoc.size;
8544 
8545  // 3. Prepare for next iteration.
8546  lastOffset = suballoc.offset + suballoc.size;
8547  --nextAlloc2ndIndex;
8548  }
8549  // We are at the end.
8550  else
8551  {
8552  if(lastOffset < size)
8553  {
8554  // There is free space from lastOffset to size.
8555  ++unusedRangeCount;
8556  }
8557 
8558  // End of loop.
8559  lastOffset = size;
8560  }
8561  }
8562  }
8563 
8564  const VkDeviceSize unusedBytes = size - usedBytes;
8565  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8566 
8567  // SECOND PASS
8568  lastOffset = 0;
8569 
8570  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8571  {
8572  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8573  size_t nextAlloc2ndIndex = 0;
8574  while(lastOffset < freeSpace2ndTo1stEnd)
8575  {
8576  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8577  while(nextAlloc2ndIndex < suballoc2ndCount &&
8578  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8579  {
8580  ++nextAlloc2ndIndex;
8581  }
8582 
8583  // Found non-null allocation.
8584  if(nextAlloc2ndIndex < suballoc2ndCount)
8585  {
8586  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8587 
8588  // 1. Process free space before this allocation.
8589  if(lastOffset < suballoc.offset)
8590  {
8591  // There is free space from lastOffset to suballoc.offset.
8592  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8594  }
8595 
8596  // 2. Process this allocation.
8597  // There is allocation with suballoc.offset, suballoc.size.
8598  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8599 
8600  // 3. Prepare for next iteration.
8601  lastOffset = suballoc.offset + suballoc.size;
8602  ++nextAlloc2ndIndex;
8603  }
8604  // We are at the end.
8605  else
8606  {
8607  if(lastOffset < freeSpace2ndTo1stEnd)
8608  {
8609  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8610  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8612  }
8613 
8614  // End of loop.
8615  lastOffset = freeSpace2ndTo1stEnd;
8616  }
8617  }
8618  }
8619 
8620  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8621  while(lastOffset < freeSpace1stTo2ndEnd)
8622  {
8623  // Find next non-null allocation or move nextAllocIndex to the end.
8624  while(nextAlloc1stIndex < suballoc1stCount &&
8625  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8626  {
8627  ++nextAlloc1stIndex;
8628  }
8629 
8630  // Found non-null allocation.
8631  if(nextAlloc1stIndex < suballoc1stCount)
8632  {
8633  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8634 
8635  // 1. Process free space before this allocation.
8636  if(lastOffset < suballoc.offset)
8637  {
8638  // There is free space from lastOffset to suballoc.offset.
8639  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8640  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8641  }
8642 
8643  // 2. Process this allocation.
8644  // There is allocation with suballoc.offset, suballoc.size.
8645  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8646 
8647  // 3. Prepare for next iteration.
8648  lastOffset = suballoc.offset + suballoc.size;
8649  ++nextAlloc1stIndex;
8650  }
8651  // We are at the end.
8652  else
8653  {
8654  if(lastOffset < freeSpace1stTo2ndEnd)
8655  {
8656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8658  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8659  }
8660 
8661  // End of loop.
8662  lastOffset = freeSpace1stTo2ndEnd;
8663  }
8664  }
8665 
8666  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8667  {
8668  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8669  while(lastOffset < size)
8670  {
8671  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8672  while(nextAlloc2ndIndex != SIZE_MAX &&
8673  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8674  {
8675  --nextAlloc2ndIndex;
8676  }
8677 
8678  // Found non-null allocation.
8679  if(nextAlloc2ndIndex != SIZE_MAX)
8680  {
8681  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8682 
8683  // 1. Process free space before this allocation.
8684  if(lastOffset < suballoc.offset)
8685  {
8686  // There is free space from lastOffset to suballoc.offset.
8687  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8688  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8689  }
8690 
8691  // 2. Process this allocation.
8692  // There is allocation with suballoc.offset, suballoc.size.
8693  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8694 
8695  // 3. Prepare for next iteration.
8696  lastOffset = suballoc.offset + suballoc.size;
8697  --nextAlloc2ndIndex;
8698  }
8699  // We are at the end.
8700  else
8701  {
8702  if(lastOffset < size)
8703  {
8704  // There is free space from lastOffset to size.
8705  const VkDeviceSize unusedRangeSize = size - lastOffset;
8706  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8707  }
8708 
8709  // End of loop.
8710  lastOffset = size;
8711  }
8712  }
8713  }
8714 
8715  PrintDetailedMap_End(json);
8716 }
8717 #endif // #if VMA_STATS_STRING_ENABLED
8718 
8719 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8720  uint32_t currentFrameIndex,
8721  uint32_t frameInUseCount,
8722  VkDeviceSize bufferImageGranularity,
8723  VkDeviceSize allocSize,
8724  VkDeviceSize allocAlignment,
8725  bool upperAddress,
8726  VmaSuballocationType allocType,
8727  bool canMakeOtherLost,
8728  uint32_t strategy,
8729  VmaAllocationRequest* pAllocationRequest)
8730 {
8731  VMA_ASSERT(allocSize > 0);
8732  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8733  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8734  VMA_HEAVY_ASSERT(Validate());
8735 
8736  const VkDeviceSize size = GetSize();
8737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8739 
8740  if(upperAddress)
8741  {
8742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8743  {
8744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8745  return false;
8746  }
8747 
8748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8749  if(allocSize > size)
8750  {
8751  return false;
8752  }
8753  VkDeviceSize resultBaseOffset = size - allocSize;
8754  if(!suballocations2nd.empty())
8755  {
8756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8757  resultBaseOffset = lastSuballoc.offset - allocSize;
8758  if(allocSize > lastSuballoc.offset)
8759  {
8760  return false;
8761  }
8762  }
8763 
8764  // Start from offset equal to end of free space.
8765  VkDeviceSize resultOffset = resultBaseOffset;
8766 
8767  // Apply VMA_DEBUG_MARGIN at the end.
8768  if(VMA_DEBUG_MARGIN > 0)
8769  {
8770  if(resultOffset < VMA_DEBUG_MARGIN)
8771  {
8772  return false;
8773  }
8774  resultOffset -= VMA_DEBUG_MARGIN;
8775  }
8776 
8777  // Apply alignment.
8778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8779 
8780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8781  // Make bigger alignment if necessary.
8782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8783  {
8784  bool bufferImageGranularityConflict = false;
8785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8786  {
8787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8789  {
8790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8791  {
8792  bufferImageGranularityConflict = true;
8793  break;
8794  }
8795  }
8796  else
8797  // Already on previous page.
8798  break;
8799  }
8800  if(bufferImageGranularityConflict)
8801  {
8802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8803  }
8804  }
8805 
8806  // There is enough free space.
8807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8808  suballocations1st.back().offset + suballocations1st.back().size :
8809  0;
8810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8811  {
8812  // Check previous suballocations for BufferImageGranularity conflicts.
8813  // If conflict exists, allocation cannot be made here.
8814  if(bufferImageGranularity > 1)
8815  {
8816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8817  {
8818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8820  {
8821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8822  {
8823  return false;
8824  }
8825  }
8826  else
8827  {
8828  // Already on next page.
8829  break;
8830  }
8831  }
8832  }
8833 
8834  // All tests passed: Success.
8835  pAllocationRequest->offset = resultOffset;
8836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8837  pAllocationRequest->sumItemSize = 0;
8838  // pAllocationRequest->item unused.
8839  pAllocationRequest->itemsToMakeLostCount = 0;
8840  return true;
8841  }
8842  }
8843  else // !upperAddress
8844  {
8845  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8846  {
8847  // Try to allocate at the end of 1st vector.
8848 
8849  VkDeviceSize resultBaseOffset = 0;
8850  if(!suballocations1st.empty())
8851  {
8852  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8853  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8854  }
8855 
8856  // Start from offset equal to beginning of free space.
8857  VkDeviceSize resultOffset = resultBaseOffset;
8858 
8859  // Apply VMA_DEBUG_MARGIN at the beginning.
8860  if(VMA_DEBUG_MARGIN > 0)
8861  {
8862  resultOffset += VMA_DEBUG_MARGIN;
8863  }
8864 
8865  // Apply alignment.
8866  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8867 
8868  // Check previous suballocations for BufferImageGranularity conflicts.
8869  // Make bigger alignment if necessary.
8870  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8871  {
8872  bool bufferImageGranularityConflict = false;
8873  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8874  {
8875  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8876  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8877  {
8878  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8879  {
8880  bufferImageGranularityConflict = true;
8881  break;
8882  }
8883  }
8884  else
8885  // Already on previous page.
8886  break;
8887  }
8888  if(bufferImageGranularityConflict)
8889  {
8890  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8891  }
8892  }
8893 
8894  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8895  suballocations2nd.back().offset : size;
8896 
8897  // There is enough free space at the end after alignment.
8898  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8899  {
8900  // Check next suballocations for BufferImageGranularity conflicts.
8901  // If conflict exists, allocation cannot be made here.
8902  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8903  {
8904  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8905  {
8906  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8907  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8908  {
8909  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8910  {
8911  return false;
8912  }
8913  }
8914  else
8915  {
8916  // Already on previous page.
8917  break;
8918  }
8919  }
8920  }
8921 
8922  // All tests passed: Success.
8923  pAllocationRequest->offset = resultOffset;
8924  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8925  pAllocationRequest->sumItemSize = 0;
8926  // pAllocationRequest->item unused.
8927  pAllocationRequest->itemsToMakeLostCount = 0;
8928  return true;
8929  }
8930  }
8931 
8932  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8933  // beginning of 1st vector as the end of free space.
8934  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8935  {
8936  VMA_ASSERT(!suballocations1st.empty());
8937 
8938  VkDeviceSize resultBaseOffset = 0;
8939  if(!suballocations2nd.empty())
8940  {
8941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8942  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8943  }
8944 
8945  // Start from offset equal to beginning of free space.
8946  VkDeviceSize resultOffset = resultBaseOffset;
8947 
8948  // Apply VMA_DEBUG_MARGIN at the beginning.
8949  if(VMA_DEBUG_MARGIN > 0)
8950  {
8951  resultOffset += VMA_DEBUG_MARGIN;
8952  }
8953 
8954  // Apply alignment.
8955  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8956 
8957  // Check previous suballocations for BufferImageGranularity conflicts.
8958  // Make bigger alignment if necessary.
8959  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8960  {
8961  bool bufferImageGranularityConflict = false;
8962  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8963  {
8964  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8965  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8966  {
8967  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8968  {
8969  bufferImageGranularityConflict = true;
8970  break;
8971  }
8972  }
8973  else
8974  // Already on previous page.
8975  break;
8976  }
8977  if(bufferImageGranularityConflict)
8978  {
8979  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8980  }
8981  }
8982 
8983  pAllocationRequest->itemsToMakeLostCount = 0;
8984  pAllocationRequest->sumItemSize = 0;
8985  size_t index1st = m_1stNullItemsBeginCount;
8986 
8987  if(canMakeOtherLost)
8988  {
8989  while(index1st < suballocations1st.size() &&
8990  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8991  {
8992  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8993  const VmaSuballocation& suballoc = suballocations1st[index1st];
8994  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8995  {
8996  // No problem.
8997  }
8998  else
8999  {
9000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9001  if(suballoc.hAllocation->CanBecomeLost() &&
9002  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9003  {
9004  ++pAllocationRequest->itemsToMakeLostCount;
9005  pAllocationRequest->sumItemSize += suballoc.size;
9006  }
9007  else
9008  {
9009  return false;
9010  }
9011  }
9012  ++index1st;
9013  }
9014 
9015  // Check next suballocations for BufferImageGranularity conflicts.
9016  // If conflict exists, we must mark more allocations lost or fail.
9017  if(bufferImageGranularity > 1)
9018  {
9019  while(index1st < suballocations1st.size())
9020  {
9021  const VmaSuballocation& suballoc = suballocations1st[index1st];
9022  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9023  {
9024  if(suballoc.hAllocation != VK_NULL_HANDLE)
9025  {
9026  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9027  if(suballoc.hAllocation->CanBecomeLost() &&
9028  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9029  {
9030  ++pAllocationRequest->itemsToMakeLostCount;
9031  pAllocationRequest->sumItemSize += suballoc.size;
9032  }
9033  else
9034  {
9035  return false;
9036  }
9037  }
9038  }
9039  else
9040  {
9041  // Already on next page.
9042  break;
9043  }
9044  ++index1st;
9045  }
9046  }
9047  }
9048 
9049  // There is enough free space at the end after alignment.
9050  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9051  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9052  {
9053  // Check next suballocations for BufferImageGranularity conflicts.
9054  // If conflict exists, allocation cannot be made here.
9055  if(bufferImageGranularity > 1)
9056  {
9057  for(size_t nextSuballocIndex = index1st;
9058  nextSuballocIndex < suballocations1st.size();
9059  nextSuballocIndex++)
9060  {
9061  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9062  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9063  {
9064  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9065  {
9066  return false;
9067  }
9068  }
9069  else
9070  {
9071  // Already on next page.
9072  break;
9073  }
9074  }
9075  }
9076 
9077  // All tests passed: Success.
9078  pAllocationRequest->offset = resultOffset;
9079  pAllocationRequest->sumFreeSize =
9080  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9081  - resultBaseOffset
9082  - pAllocationRequest->sumItemSize;
9083  // pAllocationRequest->item unused.
9084  return true;
9085  }
9086  }
9087  }
9088 
9089  return false;
9090 }
9091 
9092 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9093  uint32_t currentFrameIndex,
9094  uint32_t frameInUseCount,
9095  VmaAllocationRequest* pAllocationRequest)
9096 {
9097  if(pAllocationRequest->itemsToMakeLostCount == 0)
9098  {
9099  return true;
9100  }
9101 
9102  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9103 
9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9105  size_t index1st = m_1stNullItemsBeginCount;
9106  size_t madeLostCount = 0;
9107  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9108  {
9109  VMA_ASSERT(index1st < suballocations1st.size());
9110  VmaSuballocation& suballoc = suballocations1st[index1st];
9111  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9112  {
9113  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9114  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9115  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9116  {
9117  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9118  suballoc.hAllocation = VK_NULL_HANDLE;
9119  m_SumFreeSize += suballoc.size;
9120  ++m_1stNullItemsMiddleCount;
9121  ++madeLostCount;
9122  }
9123  else
9124  {
9125  return false;
9126  }
9127  }
9128  ++index1st;
9129  }
9130 
9131  CleanupAfterFree();
9132  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9133 
9134  return true;
9135 }
9136 
9137 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9138 {
9139  uint32_t lostAllocationCount = 0;
9140 
9141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9142  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9143  {
9144  VmaSuballocation& suballoc = suballocations1st[i];
9145  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9146  suballoc.hAllocation->CanBecomeLost() &&
9147  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9148  {
9149  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9150  suballoc.hAllocation = VK_NULL_HANDLE;
9151  ++m_1stNullItemsMiddleCount;
9152  m_SumFreeSize += suballoc.size;
9153  ++lostAllocationCount;
9154  }
9155  }
9156 
9157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9159  {
9160  VmaSuballocation& suballoc = suballocations2nd[i];
9161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9162  suballoc.hAllocation->CanBecomeLost() &&
9163  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9164  {
9165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9166  suballoc.hAllocation = VK_NULL_HANDLE;
9167  ++m_2ndNullItemsCount;
9168  ++lostAllocationCount;
9169  }
9170  }
9171 
9172  if(lostAllocationCount)
9173  {
9174  CleanupAfterFree();
9175  }
9176 
9177  return lostAllocationCount;
9178 }
9179 
9180 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9181 {
9182  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9183  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9184  {
9185  const VmaSuballocation& suballoc = suballocations1st[i];
9186  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9187  {
9188  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9189  {
9190  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9191  return VK_ERROR_VALIDATION_FAILED_EXT;
9192  }
9193  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9194  {
9195  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9196  return VK_ERROR_VALIDATION_FAILED_EXT;
9197  }
9198  }
9199  }
9200 
9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9202  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9203  {
9204  const VmaSuballocation& suballoc = suballocations2nd[i];
9205  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9206  {
9207  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9208  {
9209  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9210  return VK_ERROR_VALIDATION_FAILED_EXT;
9211  }
9212  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9213  {
9214  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9215  return VK_ERROR_VALIDATION_FAILED_EXT;
9216  }
9217  }
9218  }
9219 
9220  return VK_SUCCESS;
9221 }
9222 
9223 void VmaBlockMetadata_Linear::Alloc(
9224  const VmaAllocationRequest& request,
9225  VmaSuballocationType type,
9226  VkDeviceSize allocSize,
9227  bool upperAddress,
9228  VmaAllocation hAllocation)
9229 {
9230  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9231 
9232  if(upperAddress)
9233  {
9234  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9235  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9236  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9237  suballocations2nd.push_back(newSuballoc);
9238  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9239  }
9240  else
9241  {
9242  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9243 
9244  // First allocation.
9245  if(suballocations1st.empty())
9246  {
9247  suballocations1st.push_back(newSuballoc);
9248  }
9249  else
9250  {
9251  // New allocation at the end of 1st vector.
9252  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9253  {
9254  // Check if it fits before the end of the block.
9255  VMA_ASSERT(request.offset + allocSize <= GetSize());
9256  suballocations1st.push_back(newSuballoc);
9257  }
9258  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9259  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9260  {
9261  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9262 
9263  switch(m_2ndVectorMode)
9264  {
9265  case SECOND_VECTOR_EMPTY:
9266  // First allocation from second part ring buffer.
9267  VMA_ASSERT(suballocations2nd.empty());
9268  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9269  break;
9270  case SECOND_VECTOR_RING_BUFFER:
9271  // 2-part ring buffer is already started.
9272  VMA_ASSERT(!suballocations2nd.empty());
9273  break;
9274  case SECOND_VECTOR_DOUBLE_STACK:
9275  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9276  break;
9277  default:
9278  VMA_ASSERT(0);
9279  }
9280 
9281  suballocations2nd.push_back(newSuballoc);
9282  }
9283  else
9284  {
9285  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9286  }
9287  }
9288  }
9289 
9290  m_SumFreeSize -= newSuballoc.size;
9291 }
9292 
9293 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9294 {
9295  FreeAtOffset(allocation->GetOffset());
9296 }
9297 
9298 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9299 {
9300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9301  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9302 
9303  if(!suballocations1st.empty())
9304  {
9305  // First allocation: Mark it as next empty at the beginning.
9306  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9307  if(firstSuballoc.offset == offset)
9308  {
9309  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9310  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9311  m_SumFreeSize += firstSuballoc.size;
9312  ++m_1stNullItemsBeginCount;
9313  CleanupAfterFree();
9314  return;
9315  }
9316  }
9317 
9318  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9320  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9321  {
9322  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9323  if(lastSuballoc.offset == offset)
9324  {
9325  m_SumFreeSize += lastSuballoc.size;
9326  suballocations2nd.pop_back();
9327  CleanupAfterFree();
9328  return;
9329  }
9330  }
9331  // Last allocation in 1st vector.
9332  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9333  {
9334  VmaSuballocation& lastSuballoc = suballocations1st.back();
9335  if(lastSuballoc.offset == offset)
9336  {
9337  m_SumFreeSize += lastSuballoc.size;
9338  suballocations1st.pop_back();
9339  CleanupAfterFree();
9340  return;
9341  }
9342  }
9343 
9344  // Item from the middle of 1st vector.
9345  {
9346  VmaSuballocation refSuballoc;
9347  refSuballoc.offset = offset;
9348  // Rest of members stays uninitialized intentionally for better performance.
9349  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9350  suballocations1st.begin() + m_1stNullItemsBeginCount,
9351  suballocations1st.end(),
9352  refSuballoc);
9353  if(it != suballocations1st.end())
9354  {
9355  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9356  it->hAllocation = VK_NULL_HANDLE;
9357  ++m_1stNullItemsMiddleCount;
9358  m_SumFreeSize += it->size;
9359  CleanupAfterFree();
9360  return;
9361  }
9362  }
9363 
9364  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9365  {
9366  // Item from the middle of 2nd vector.
9367  VmaSuballocation refSuballoc;
9368  refSuballoc.offset = offset;
9369  // Rest of members stays uninitialized intentionally for better performance.
9370  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9371  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9372  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9373  if(it != suballocations2nd.end())
9374  {
9375  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9376  it->hAllocation = VK_NULL_HANDLE;
9377  ++m_2ndNullItemsCount;
9378  m_SumFreeSize += it->size;
9379  CleanupAfterFree();
9380  return;
9381  }
9382  }
9383 
9384  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9385 }
9386 
9387 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9388 {
9389  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9390  const size_t suballocCount = AccessSuballocations1st().size();
9391  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9392 }
9393 
9394 void VmaBlockMetadata_Linear::CleanupAfterFree()
9395 {
9396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9398 
9399  if(IsEmpty())
9400  {
9401  suballocations1st.clear();
9402  suballocations2nd.clear();
9403  m_1stNullItemsBeginCount = 0;
9404  m_1stNullItemsMiddleCount = 0;
9405  m_2ndNullItemsCount = 0;
9406  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9407  }
9408  else
9409  {
9410  const size_t suballoc1stCount = suballocations1st.size();
9411  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9412  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9413 
9414  // Find more null items at the beginning of 1st vector.
9415  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9416  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9417  {
9418  ++m_1stNullItemsBeginCount;
9419  --m_1stNullItemsMiddleCount;
9420  }
9421 
9422  // Find more null items at the end of 1st vector.
9423  while(m_1stNullItemsMiddleCount > 0 &&
9424  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9425  {
9426  --m_1stNullItemsMiddleCount;
9427  suballocations1st.pop_back();
9428  }
9429 
9430  // Find more null items at the end of 2nd vector.
9431  while(m_2ndNullItemsCount > 0 &&
9432  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9433  {
9434  --m_2ndNullItemsCount;
9435  suballocations2nd.pop_back();
9436  }
9437 
9438  if(ShouldCompact1st())
9439  {
9440  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9441  size_t srcIndex = m_1stNullItemsBeginCount;
9442  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9443  {
9444  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9445  {
9446  ++srcIndex;
9447  }
9448  if(dstIndex != srcIndex)
9449  {
9450  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9451  }
9452  ++srcIndex;
9453  }
9454  suballocations1st.resize(nonNullItemCount);
9455  m_1stNullItemsBeginCount = 0;
9456  m_1stNullItemsMiddleCount = 0;
9457  }
9458 
9459  // 2nd vector became empty.
9460  if(suballocations2nd.empty())
9461  {
9462  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9463  }
9464 
9465  // 1st vector became empty.
9466  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9467  {
9468  suballocations1st.clear();
9469  m_1stNullItemsBeginCount = 0;
9470 
9471  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9472  {
9473  // Swap 1st with 2nd. Now 2nd is empty.
9474  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9475  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9476  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9477  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9478  {
9479  ++m_1stNullItemsBeginCount;
9480  --m_1stNullItemsMiddleCount;
9481  }
9482  m_2ndNullItemsCount = 0;
9483  m_1stVectorIndex ^= 1;
9484  }
9485  }
9486  }
9487 
9488  VMA_HEAVY_ASSERT(Validate());
9489 }
9490 
9491 
9493 // class VmaBlockMetadata_Buddy
9494 
9495 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9496  VmaBlockMetadata(hAllocator),
9497  m_Root(VMA_NULL),
9498  m_AllocationCount(0),
9499  m_FreeCount(1),
9500  m_SumFreeSize(0)
9501 {
9502  memset(m_FreeList, 0, sizeof(m_FreeList));
9503 }
9504 
9505 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9506 {
9507  DeleteNode(m_Root);
9508 }
9509 
9510 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9511 {
9512  VmaBlockMetadata::Init(size);
9513 
9514  m_UsableSize = VmaPrevPow2(size);
9515  m_SumFreeSize = m_UsableSize;
9516 
9517  // Calculate m_LevelCount.
9518  m_LevelCount = 1;
9519  while(m_LevelCount < MAX_LEVELS &&
9520  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9521  {
9522  ++m_LevelCount;
9523  }
9524 
9525  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9526  rootNode->offset = 0;
9527  rootNode->type = Node::TYPE_FREE;
9528  rootNode->parent = VMA_NULL;
9529  rootNode->buddy = VMA_NULL;
9530 
9531  m_Root = rootNode;
9532  AddToFreeListFront(0, rootNode);
9533 }
9534 
9535 bool VmaBlockMetadata_Buddy::Validate() const
9536 {
9537  // Validate tree.
9538  ValidationContext ctx;
9539  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9540  {
9541  VMA_VALIDATE(false && "ValidateNode failed.");
9542  }
9543  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9544  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9545 
9546  // Validate free node lists.
9547  for(uint32_t level = 0; level < m_LevelCount; ++level)
9548  {
9549  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9550  m_FreeList[level].front->free.prev == VMA_NULL);
9551 
9552  for(Node* node = m_FreeList[level].front;
9553  node != VMA_NULL;
9554  node = node->free.next)
9555  {
9556  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9557 
9558  if(node->free.next == VMA_NULL)
9559  {
9560  VMA_VALIDATE(m_FreeList[level].back == node);
9561  }
9562  else
9563  {
9564  VMA_VALIDATE(node->free.next->free.prev == node);
9565  }
9566  }
9567  }
9568 
9569  // Validate that free lists ar higher levels are empty.
9570  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9571  {
9572  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9573  }
9574 
9575  return true;
9576 }
9577 
9578 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9579 {
9580  for(uint32_t level = 0; level < m_LevelCount; ++level)
9581  {
9582  if(m_FreeList[level].front != VMA_NULL)
9583  {
9584  return LevelToNodeSize(level);
9585  }
9586  }
9587  return 0;
9588 }
9589 
9590 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9591 {
9592  const VkDeviceSize unusableSize = GetUnusableSize();
9593 
9594  outInfo.blockCount = 1;
9595 
9596  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9597  outInfo.usedBytes = outInfo.unusedBytes = 0;
9598 
9599  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9600  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9601  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9602 
9603  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9604 
9605  if(unusableSize > 0)
9606  {
9607  ++outInfo.unusedRangeCount;
9608  outInfo.unusedBytes += unusableSize;
9609  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9611  }
9612 }
9613 
9614 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9615 {
9616  const VkDeviceSize unusableSize = GetUnusableSize();
9617 
9618  inoutStats.size += GetSize();
9619  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9620  inoutStats.allocationCount += m_AllocationCount;
9621  inoutStats.unusedRangeCount += m_FreeCount;
9622  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9623 
9624  if(unusableSize > 0)
9625  {
9626  ++inoutStats.unusedRangeCount;
9627  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9628  }
9629 }
9630 
9631 #if VMA_STATS_STRING_ENABLED
9632 
9633 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9634 {
9635  // TODO optimize
9636  VmaStatInfo stat;
9637  CalcAllocationStatInfo(stat);
9638 
9639  PrintDetailedMap_Begin(
9640  json,
9641  stat.unusedBytes,
9642  stat.allocationCount,
9643  stat.unusedRangeCount);
9644 
9645  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9646 
9647  const VkDeviceSize unusableSize = GetUnusableSize();
9648  if(unusableSize > 0)
9649  {
9650  PrintDetailedMap_UnusedRange(json,
9651  m_UsableSize, // offset
9652  unusableSize); // size
9653  }
9654 
9655  PrintDetailedMap_End(json);
9656 }
9657 
9658 #endif // #if VMA_STATS_STRING_ENABLED
9659 
9660 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9661  uint32_t currentFrameIndex,
9662  uint32_t frameInUseCount,
9663  VkDeviceSize bufferImageGranularity,
9664  VkDeviceSize allocSize,
9665  VkDeviceSize allocAlignment,
9666  bool upperAddress,
9667  VmaSuballocationType allocType,
9668  bool canMakeOtherLost,
9669  uint32_t strategy,
9670  VmaAllocationRequest* pAllocationRequest)
9671 {
9672  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9673 
9674  // Simple way to respect bufferImageGranularity. May be optimized some day.
9675  // Whenever it might be an OPTIMAL image...
9676  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9677  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9678  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9679  {
9680  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9681  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9682  }
9683 
9684  if(allocSize > m_UsableSize)
9685  {
9686  return false;
9687  }
9688 
9689  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9690  for(uint32_t level = targetLevel + 1; level--; )
9691  {
9692  for(Node* freeNode = m_FreeList[level].front;
9693  freeNode != VMA_NULL;
9694  freeNode = freeNode->free.next)
9695  {
9696  if(freeNode->offset % allocAlignment == 0)
9697  {
9698  pAllocationRequest->offset = freeNode->offset;
9699  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9700  pAllocationRequest->sumItemSize = 0;
9701  pAllocationRequest->itemsToMakeLostCount = 0;
9702  pAllocationRequest->customData = (void*)(uintptr_t)level;
9703  return true;
9704  }
9705  }
9706  }
9707 
9708  return false;
9709 }
9710 
9711 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9712  uint32_t currentFrameIndex,
9713  uint32_t frameInUseCount,
9714  VmaAllocationRequest* pAllocationRequest)
9715 {
9716  /*
9717  Lost allocations are not supported in buddy allocator at the moment.
9718  Support might be added in the future.
9719  */
9720  return pAllocationRequest->itemsToMakeLostCount == 0;
9721 }
9722 
9723 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9724 {
9725  /*
9726  Lost allocations are not supported in buddy allocator at the moment.
9727  Support might be added in the future.
9728  */
9729  return 0;
9730 }
9731 
9732 void VmaBlockMetadata_Buddy::Alloc(
9733  const VmaAllocationRequest& request,
9734  VmaSuballocationType type,
9735  VkDeviceSize allocSize,
9736  bool upperAddress,
9737  VmaAllocation hAllocation)
9738 {
9739  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9740  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9741 
9742  Node* currNode = m_FreeList[currLevel].front;
9743  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9744  while(currNode->offset != request.offset)
9745  {
9746  currNode = currNode->free.next;
9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9748  }
9749 
9750  // Go down, splitting free nodes.
9751  while(currLevel < targetLevel)
9752  {
9753  // currNode is already first free node at currLevel.
9754  // Remove it from list of free nodes at this currLevel.
9755  RemoveFromFreeList(currLevel, currNode);
9756 
9757  const uint32_t childrenLevel = currLevel + 1;
9758 
9759  // Create two free sub-nodes.
9760  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9761  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9762 
9763  leftChild->offset = currNode->offset;
9764  leftChild->type = Node::TYPE_FREE;
9765  leftChild->parent = currNode;
9766  leftChild->buddy = rightChild;
9767 
9768  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9769  rightChild->type = Node::TYPE_FREE;
9770  rightChild->parent = currNode;
9771  rightChild->buddy = leftChild;
9772 
9773  // Convert current currNode to split type.
9774  currNode->type = Node::TYPE_SPLIT;
9775  currNode->split.leftChild = leftChild;
9776 
9777  // Add child nodes to free list. Order is important!
9778  AddToFreeListFront(childrenLevel, rightChild);
9779  AddToFreeListFront(childrenLevel, leftChild);
9780 
9781  ++m_FreeCount;
9782  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9783  ++currLevel;
9784  currNode = m_FreeList[currLevel].front;
9785 
9786  /*
9787  We can be sure that currNode, as left child of node previously split,
9788  also fullfills the alignment requirement.
9789  */
9790  }
9791 
9792  // Remove from free list.
9793  VMA_ASSERT(currLevel == targetLevel &&
9794  currNode != VMA_NULL &&
9795  currNode->type == Node::TYPE_FREE);
9796  RemoveFromFreeList(currLevel, currNode);
9797 
9798  // Convert to allocation node.
9799  currNode->type = Node::TYPE_ALLOCATION;
9800  currNode->allocation.alloc = hAllocation;
9801 
9802  ++m_AllocationCount;
9803  --m_FreeCount;
9804  m_SumFreeSize -= allocSize;
9805 }
9806 
9807 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9808 {
9809  if(node->type == Node::TYPE_SPLIT)
9810  {
9811  DeleteNode(node->split.leftChild->buddy);
9812  DeleteNode(node->split.leftChild);
9813  }
9814 
9815  vma_delete(GetAllocationCallbacks(), node);
9816 }
9817 
9818 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9819 {
9820  VMA_VALIDATE(level < m_LevelCount);
9821  VMA_VALIDATE(curr->parent == parent);
9822  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9823  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9824  switch(curr->type)
9825  {
9826  case Node::TYPE_FREE:
9827  // curr->free.prev, next are validated separately.
9828  ctx.calculatedSumFreeSize += levelNodeSize;
9829  ++ctx.calculatedFreeCount;
9830  break;
9831  case Node::TYPE_ALLOCATION:
9832  ++ctx.calculatedAllocationCount;
9833  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9834  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9835  break;
9836  case Node::TYPE_SPLIT:
9837  {
9838  const uint32_t childrenLevel = level + 1;
9839  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9840  const Node* const leftChild = curr->split.leftChild;
9841  VMA_VALIDATE(leftChild != VMA_NULL);
9842  VMA_VALIDATE(leftChild->offset == curr->offset);
9843  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9844  {
9845  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9846  }
9847  const Node* const rightChild = leftChild->buddy;
9848  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9849  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9850  {
9851  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9852  }
9853  }
9854  break;
9855  default:
9856  return false;
9857  }
9858 
9859  return true;
9860 }
9861 
9862 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9863 {
9864  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9865  uint32_t level = 0;
9866  VkDeviceSize currLevelNodeSize = m_UsableSize;
9867  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9868  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9869  {
9870  ++level;
9871  currLevelNodeSize = nextLevelNodeSize;
9872  nextLevelNodeSize = currLevelNodeSize >> 1;
9873  }
9874  return level;
9875 }
9876 
9877 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9878 {
9879  // Find node and level.
9880  Node* node = m_Root;
9881  VkDeviceSize nodeOffset = 0;
9882  uint32_t level = 0;
9883  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9884  while(node->type == Node::TYPE_SPLIT)
9885  {
9886  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9887  if(offset < nodeOffset + nextLevelSize)
9888  {
9889  node = node->split.leftChild;
9890  }
9891  else
9892  {
9893  node = node->split.leftChild->buddy;
9894  nodeOffset += nextLevelSize;
9895  }
9896  ++level;
9897  levelNodeSize = nextLevelSize;
9898  }
9899 
9900  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9901  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9902 
9903  ++m_FreeCount;
9904  --m_AllocationCount;
9905  m_SumFreeSize += alloc->GetSize();
9906 
9907  node->type = Node::TYPE_FREE;
9908 
9909  // Join free nodes if possible.
9910  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9911  {
9912  RemoveFromFreeList(level, node->buddy);
9913  Node* const parent = node->parent;
9914 
9915  vma_delete(GetAllocationCallbacks(), node->buddy);
9916  vma_delete(GetAllocationCallbacks(), node);
9917  parent->type = Node::TYPE_FREE;
9918 
9919  node = parent;
9920  --level;
9921  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9922  --m_FreeCount;
9923  }
9924 
9925  AddToFreeListFront(level, node);
9926 }
9927 
9928 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9929 {
9930  switch(node->type)
9931  {
9932  case Node::TYPE_FREE:
9933  ++outInfo.unusedRangeCount;
9934  outInfo.unusedBytes += levelNodeSize;
9935  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9936  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9937  break;
9938  case Node::TYPE_ALLOCATION:
9939  {
9940  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9941  ++outInfo.allocationCount;
9942  outInfo.usedBytes += allocSize;
9943  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9944  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9945 
9946  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9947  if(unusedRangeSize > 0)
9948  {
9949  ++outInfo.unusedRangeCount;
9950  outInfo.unusedBytes += unusedRangeSize;
9951  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9952  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9953  }
9954  }
9955  break;
9956  case Node::TYPE_SPLIT:
9957  {
9958  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9959  const Node* const leftChild = node->split.leftChild;
9960  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9961  const Node* const rightChild = leftChild->buddy;
9962  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9963  }
9964  break;
9965  default:
9966  VMA_ASSERT(0);
9967  }
9968 }
9969 
9970 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9971 {
9972  VMA_ASSERT(node->type == Node::TYPE_FREE);
9973 
9974  // List is empty.
9975  Node* const frontNode = m_FreeList[level].front;
9976  if(frontNode == VMA_NULL)
9977  {
9978  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9979  node->free.prev = node->free.next = VMA_NULL;
9980  m_FreeList[level].front = m_FreeList[level].back = node;
9981  }
9982  else
9983  {
9984  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9985  node->free.prev = VMA_NULL;
9986  node->free.next = frontNode;
9987  frontNode->free.prev = node;
9988  m_FreeList[level].front = node;
9989  }
9990 }
9991 
9992 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9993 {
9994  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9995 
9996  // It is at the front.
9997  if(node->free.prev == VMA_NULL)
9998  {
9999  VMA_ASSERT(m_FreeList[level].front == node);
10000  m_FreeList[level].front = node->free.next;
10001  }
10002  else
10003  {
10004  Node* const prevFreeNode = node->free.prev;
10005  VMA_ASSERT(prevFreeNode->free.next == node);
10006  prevFreeNode->free.next = node->free.next;
10007  }
10008 
10009  // It is at the back.
10010  if(node->free.next == VMA_NULL)
10011  {
10012  VMA_ASSERT(m_FreeList[level].back == node);
10013  m_FreeList[level].back = node->free.prev;
10014  }
10015  else
10016  {
10017  Node* const nextFreeNode = node->free.next;
10018  VMA_ASSERT(nextFreeNode->free.prev == node);
10019  nextFreeNode->free.prev = node->free.prev;
10020  }
10021 }
10022 
10023 #if VMA_STATS_STRING_ENABLED
10024 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10025 {
10026  switch(node->type)
10027  {
10028  case Node::TYPE_FREE:
10029  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10030  break;
10031  case Node::TYPE_ALLOCATION:
10032  {
10033  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10035  if(allocSize < levelNodeSize)
10036  {
10037  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10038  }
10039  }
10040  break;
10041  case Node::TYPE_SPLIT:
10042  {
10043  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10044  const Node* const leftChild = node->split.leftChild;
10045  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10046  const Node* const rightChild = leftChild->buddy;
10047  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10048  }
10049  break;
10050  default:
10051  VMA_ASSERT(0);
10052  }
10053 }
10054 #endif // #if VMA_STATS_STRING_ENABLED
10055 
10056 
10058 // class VmaDeviceMemoryBlock
10059 
10060 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10061  m_pMetadata(VMA_NULL),
10062  m_MemoryTypeIndex(UINT32_MAX),
10063  m_Id(0),
10064  m_hMemory(VK_NULL_HANDLE),
10065  m_MapCount(0),
10066  m_pMappedData(VMA_NULL)
10067 {
10068 }
10069 
10070 void VmaDeviceMemoryBlock::Init(
10071  VmaAllocator hAllocator,
10072  uint32_t newMemoryTypeIndex,
10073  VkDeviceMemory newMemory,
10074  VkDeviceSize newSize,
10075  uint32_t id,
10076  uint32_t algorithm)
10077 {
10078  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10079 
10080  m_MemoryTypeIndex = newMemoryTypeIndex;
10081  m_Id = id;
10082  m_hMemory = newMemory;
10083 
10084  switch(algorithm)
10085  {
10087  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10088  break;
10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10091  break;
10092  default:
10093  VMA_ASSERT(0);
10094  // Fall-through.
10095  case 0:
10096  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10097  }
10098  m_pMetadata->Init(newSize);
10099 }
10100 
10101 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10102 {
10103  // This is the most important assert in the entire library.
10104  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10105  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10106 
10107  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10108  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10109  m_hMemory = VK_NULL_HANDLE;
10110 
10111  vma_delete(allocator, m_pMetadata);
10112  m_pMetadata = VMA_NULL;
10113 }
10114 
10115 bool VmaDeviceMemoryBlock::Validate() const
10116 {
10117  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10118  (m_pMetadata->GetSize() != 0));
10119 
10120  return m_pMetadata->Validate();
10121 }
10122 
10123 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10124 {
10125  void* pData = nullptr;
10126  VkResult res = Map(hAllocator, 1, &pData);
10127  if(res != VK_SUCCESS)
10128  {
10129  return res;
10130  }
10131 
10132  res = m_pMetadata->CheckCorruption(pData);
10133 
10134  Unmap(hAllocator, 1);
10135 
10136  return res;
10137 }
10138 
10139 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10140 {
10141  if(count == 0)
10142  {
10143  return VK_SUCCESS;
10144  }
10145 
10146  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10147  if(m_MapCount != 0)
10148  {
10149  m_MapCount += count;
10150  VMA_ASSERT(m_pMappedData != VMA_NULL);
10151  if(ppData != VMA_NULL)
10152  {
10153  *ppData = m_pMappedData;
10154  }
10155  return VK_SUCCESS;
10156  }
10157  else
10158  {
10159  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10160  hAllocator->m_hDevice,
10161  m_hMemory,
10162  0, // offset
10163  VK_WHOLE_SIZE,
10164  0, // flags
10165  &m_pMappedData);
10166  if(result == VK_SUCCESS)
10167  {
10168  if(ppData != VMA_NULL)
10169  {
10170  *ppData = m_pMappedData;
10171  }
10172  m_MapCount = count;
10173  }
10174  return result;
10175  }
10176 }
10177 
10178 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10179 {
10180  if(count == 0)
10181  {
10182  return;
10183  }
10184 
10185  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10186  if(m_MapCount >= count)
10187  {
10188  m_MapCount -= count;
10189  if(m_MapCount == 0)
10190  {
10191  m_pMappedData = VMA_NULL;
10192  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10193  }
10194  }
10195  else
10196  {
10197  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10198  }
10199 }
10200 
10201 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10202 {
10203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10205 
10206  void* pData;
10207  VkResult res = Map(hAllocator, 1, &pData);
10208  if(res != VK_SUCCESS)
10209  {
10210  return res;
10211  }
10212 
10213  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10214  VmaWriteMagicValue(pData, allocOffset + allocSize);
10215 
10216  Unmap(hAllocator, 1);
10217 
10218  return VK_SUCCESS;
10219 }
10220 
10221 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10222 {
10223  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10224  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10225 
10226  void* pData;
10227  VkResult res = Map(hAllocator, 1, &pData);
10228  if(res != VK_SUCCESS)
10229  {
10230  return res;
10231  }
10232 
10233  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10234  {
10235  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10236  }
10237  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10238  {
10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10240  }
10241 
10242  Unmap(hAllocator, 1);
10243 
10244  return VK_SUCCESS;
10245 }
10246 
10247 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10248  const VmaAllocator hAllocator,
10249  const VmaAllocation hAllocation,
10250  VkBuffer hBuffer)
10251 {
10252  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10253  hAllocation->GetBlock() == this);
10254  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10255  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10256  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10257  hAllocator->m_hDevice,
10258  hBuffer,
10259  m_hMemory,
10260  hAllocation->GetOffset());
10261 }
10262 
10263 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10264  const VmaAllocator hAllocator,
10265  const VmaAllocation hAllocation,
10266  VkImage hImage)
10267 {
10268  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10269  hAllocation->GetBlock() == this);
10270  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10272  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10273  hAllocator->m_hDevice,
10274  hImage,
10275  m_hMemory,
10276  hAllocation->GetOffset());
10277 }
10278 
10279 static void InitStatInfo(VmaStatInfo& outInfo)
10280 {
10281  memset(&outInfo, 0, sizeof(outInfo));
10282  outInfo.allocationSizeMin = UINT64_MAX;
10283  outInfo.unusedRangeSizeMin = UINT64_MAX;
10284 }
10285 
10286 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10287 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10288 {
10289  inoutInfo.blockCount += srcInfo.blockCount;
10290  inoutInfo.allocationCount += srcInfo.allocationCount;
10291  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10292  inoutInfo.usedBytes += srcInfo.usedBytes;
10293  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10294  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10295  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10296  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10297  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10298 }
10299 
10300 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10301 {
10302  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10304  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10305  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10306 }
10307 
10308 VmaPool_T::VmaPool_T(
10309  VmaAllocator hAllocator,
10310  const VmaPoolCreateInfo& createInfo,
10311  VkDeviceSize preferredBlockSize) :
10312  m_BlockVector(
10313  hAllocator,
10314  createInfo.memoryTypeIndex,
10315  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10316  createInfo.minBlockCount,
10317  createInfo.maxBlockCount,
10318  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10319  createInfo.frameInUseCount,
10320  true, // isCustomPool
10321  createInfo.blockSize != 0, // explicitBlockSize
10322  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10323  m_Id(0)
10324 {
10325 }
10326 
10327 VmaPool_T::~VmaPool_T()
10328 {
10329 }
10330 
10331 #if VMA_STATS_STRING_ENABLED
10332 
10333 #endif // #if VMA_STATS_STRING_ENABLED
10334 
10335 VmaBlockVector::VmaBlockVector(
10336  VmaAllocator hAllocator,
10337  uint32_t memoryTypeIndex,
10338  VkDeviceSize preferredBlockSize,
10339  size_t minBlockCount,
10340  size_t maxBlockCount,
10341  VkDeviceSize bufferImageGranularity,
10342  uint32_t frameInUseCount,
10343  bool isCustomPool,
10344  bool explicitBlockSize,
10345  uint32_t algorithm) :
10346  m_hAllocator(hAllocator),
10347  m_MemoryTypeIndex(memoryTypeIndex),
10348  m_PreferredBlockSize(preferredBlockSize),
10349  m_MinBlockCount(minBlockCount),
10350  m_MaxBlockCount(maxBlockCount),
10351  m_BufferImageGranularity(bufferImageGranularity),
10352  m_FrameInUseCount(frameInUseCount),
10353  m_IsCustomPool(isCustomPool),
10354  m_ExplicitBlockSize(explicitBlockSize),
10355  m_Algorithm(algorithm),
10356  m_HasEmptyBlock(false),
10357  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10358  m_pDefragmentator(VMA_NULL),
10359  m_NextBlockId(0)
10360 {
10361 }
10362 
10363 VmaBlockVector::~VmaBlockVector()
10364 {
10365  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10366 
10367  for(size_t i = m_Blocks.size(); i--; )
10368  {
10369  m_Blocks[i]->Destroy(m_hAllocator);
10370  vma_delete(m_hAllocator, m_Blocks[i]);
10371  }
10372 }
10373 
10374 VkResult VmaBlockVector::CreateMinBlocks()
10375 {
10376  for(size_t i = 0; i < m_MinBlockCount; ++i)
10377  {
10378  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10379  if(res != VK_SUCCESS)
10380  {
10381  return res;
10382  }
10383  }
10384  return VK_SUCCESS;
10385 }
10386 
10387 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10388 {
10389  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10390 
10391  const size_t blockCount = m_Blocks.size();
10392 
10393  pStats->size = 0;
10394  pStats->unusedSize = 0;
10395  pStats->allocationCount = 0;
10396  pStats->unusedRangeCount = 0;
10397  pStats->unusedRangeSizeMax = 0;
10398  pStats->blockCount = blockCount;
10399 
10400  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10401  {
10402  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10403  VMA_ASSERT(pBlock);
10404  VMA_HEAVY_ASSERT(pBlock->Validate());
10405  pBlock->m_pMetadata->AddPoolStats(*pStats);
10406  }
10407 }
10408 
10409 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10410 {
10411  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10412  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10413  (VMA_DEBUG_MARGIN > 0) &&
10414  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10415 }
10416 
10417 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10418 
10419 VkResult VmaBlockVector::Allocate(
10420  VmaPool hCurrentPool,
10421  uint32_t currentFrameIndex,
10422  VkDeviceSize size,
10423  VkDeviceSize alignment,
10424  const VmaAllocationCreateInfo& createInfo,
10425  VmaSuballocationType suballocType,
10426  VmaAllocation* pAllocation)
10427 {
10428  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10429  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10430  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10431  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10432  const bool canCreateNewBlock =
10433  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10434  (m_Blocks.size() < m_MaxBlockCount);
10435  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10436 
10437  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10438  // Which in turn is available only when maxBlockCount = 1.
10439  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10440  {
10441  canMakeOtherLost = false;
10442  }
10443 
10444  // Upper address can only be used with linear allocator and within single memory block.
10445  if(isUpperAddress &&
10446  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10447  {
10448  return VK_ERROR_FEATURE_NOT_PRESENT;
10449  }
10450 
10451  // Validate strategy.
10452  switch(strategy)
10453  {
10454  case 0:
10456  break;
10460  break;
10461  default:
10462  return VK_ERROR_FEATURE_NOT_PRESENT;
10463  }
10464 
10465  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10466  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10467  {
10468  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10469  }
10470 
10471  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10472 
10473  /*
10474  Under certain condition, this whole section can be skipped for optimization, so
10475  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10476  e.g. for custom pools with linear algorithm.
10477  */
10478  if(!canMakeOtherLost || canCreateNewBlock)
10479  {
10480  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10481  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10483 
10484  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10485  {
10486  // Use only last block.
10487  if(!m_Blocks.empty())
10488  {
10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10490  VMA_ASSERT(pCurrBlock);
10491  VkResult res = AllocateFromBlock(
10492  pCurrBlock,
10493  hCurrentPool,
10494  currentFrameIndex,
10495  size,
10496  alignment,
10497  allocFlagsCopy,
10498  createInfo.pUserData,
10499  suballocType,
10500  strategy,
10501  pAllocation);
10502  if(res == VK_SUCCESS)
10503  {
10504  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10505  return VK_SUCCESS;
10506  }
10507  }
10508  }
10509  else
10510  {
10512  {
10513  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10514  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10515  {
10516  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10517  VMA_ASSERT(pCurrBlock);
10518  VkResult res = AllocateFromBlock(
10519  pCurrBlock,
10520  hCurrentPool,
10521  currentFrameIndex,
10522  size,
10523  alignment,
10524  allocFlagsCopy,
10525  createInfo.pUserData,
10526  suballocType,
10527  strategy,
10528  pAllocation);
10529  if(res == VK_SUCCESS)
10530  {
10531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10532  return VK_SUCCESS;
10533  }
10534  }
10535  }
10536  else // WORST_FIT, FIRST_FIT
10537  {
10538  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10539  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10540  {
10541  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10542  VMA_ASSERT(pCurrBlock);
10543  VkResult res = AllocateFromBlock(
10544  pCurrBlock,
10545  hCurrentPool,
10546  currentFrameIndex,
10547  size,
10548  alignment,
10549  allocFlagsCopy,
10550  createInfo.pUserData,
10551  suballocType,
10552  strategy,
10553  pAllocation);
10554  if(res == VK_SUCCESS)
10555  {
10556  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10557  return VK_SUCCESS;
10558  }
10559  }
10560  }
10561  }
10562 
10563  // 2. Try to create new block.
10564  if(canCreateNewBlock)
10565  {
10566  // Calculate optimal size for new block.
10567  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10568  uint32_t newBlockSizeShift = 0;
10569  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10570 
10571  if(!m_ExplicitBlockSize)
10572  {
10573  // Allocate 1/8, 1/4, 1/2 as first blocks.
10574  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10575  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10576  {
10577  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10578  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10579  {
10580  newBlockSize = smallerNewBlockSize;
10581  ++newBlockSizeShift;
10582  }
10583  else
10584  {
10585  break;
10586  }
10587  }
10588  }
10589 
10590  size_t newBlockIndex = 0;
10591  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10592  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10593  if(!m_ExplicitBlockSize)
10594  {
10595  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10596  {
10597  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10598  if(smallerNewBlockSize >= size)
10599  {
10600  newBlockSize = smallerNewBlockSize;
10601  ++newBlockSizeShift;
10602  res = CreateBlock(newBlockSize, &newBlockIndex);
10603  }
10604  else
10605  {
10606  break;
10607  }
10608  }
10609  }
10610 
10611  if(res == VK_SUCCESS)
10612  {
10613  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10614  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10615 
10616  res = AllocateFromBlock(
10617  pBlock,
10618  hCurrentPool,
10619  currentFrameIndex,
10620  size,
10621  alignment,
10622  allocFlagsCopy,
10623  createInfo.pUserData,
10624  suballocType,
10625  strategy,
10626  pAllocation);
10627  if(res == VK_SUCCESS)
10628  {
10629  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10630  return VK_SUCCESS;
10631  }
10632  else
10633  {
10634  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10636  }
10637  }
10638  }
10639  }
10640 
10641  // 3. Try to allocate from existing blocks with making other allocations lost.
10642  if(canMakeOtherLost)
10643  {
10644  uint32_t tryIndex = 0;
10645  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10646  {
10647  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10648  VmaAllocationRequest bestRequest = {};
10649  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10650 
10651  // 1. Search existing allocations.
10653  {
10654  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10655  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10656  {
10657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10658  VMA_ASSERT(pCurrBlock);
10659  VmaAllocationRequest currRequest = {};
10660  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10661  currentFrameIndex,
10662  m_FrameInUseCount,
10663  m_BufferImageGranularity,
10664  size,
10665  alignment,
10666  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10667  suballocType,
10668  canMakeOtherLost,
10669  strategy,
10670  &currRequest))
10671  {
10672  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10673  if(pBestRequestBlock == VMA_NULL ||
10674  currRequestCost < bestRequestCost)
10675  {
10676  pBestRequestBlock = pCurrBlock;
10677  bestRequest = currRequest;
10678  bestRequestCost = currRequestCost;
10679 
10680  if(bestRequestCost == 0)
10681  {
10682  break;
10683  }
10684  }
10685  }
10686  }
10687  }
10688  else // WORST_FIT, FIRST_FIT
10689  {
10690  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10691  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10692  {
10693  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10694  VMA_ASSERT(pCurrBlock);
10695  VmaAllocationRequest currRequest = {};
10696  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10697  currentFrameIndex,
10698  m_FrameInUseCount,
10699  m_BufferImageGranularity,
10700  size,
10701  alignment,
10702  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10703  suballocType,
10704  canMakeOtherLost,
10705  strategy,
10706  &currRequest))
10707  {
10708  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10709  if(pBestRequestBlock == VMA_NULL ||
10710  currRequestCost < bestRequestCost ||
10712  {
10713  pBestRequestBlock = pCurrBlock;
10714  bestRequest = currRequest;
10715  bestRequestCost = currRequestCost;
10716 
10717  if(bestRequestCost == 0 ||
10719  {
10720  break;
10721  }
10722  }
10723  }
10724  }
10725  }
10726 
10727  if(pBestRequestBlock != VMA_NULL)
10728  {
10729  if(mapped)
10730  {
10731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10732  if(res != VK_SUCCESS)
10733  {
10734  return res;
10735  }
10736  }
10737 
10738  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10739  currentFrameIndex,
10740  m_FrameInUseCount,
10741  &bestRequest))
10742  {
10743  // We no longer have an empty Allocation.
10744  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10745  {
10746  m_HasEmptyBlock = false;
10747  }
10748  // Allocate from this pBlock.
10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10750  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10751  (*pAllocation)->InitBlockAllocation(
10752  hCurrentPool,
10753  pBestRequestBlock,
10754  bestRequest.offset,
10755  alignment,
10756  size,
10757  suballocType,
10758  mapped,
10759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10763  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10764  {
10765  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10766  }
10767  if(IsCorruptionDetectionEnabled())
10768  {
10769  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10770  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10771  }
10772  return VK_SUCCESS;
10773  }
10774  // else: Some allocations must have been touched while we are here. Next try.
10775  }
10776  else
10777  {
10778  // Could not find place in any of the blocks - break outer loop.
10779  break;
10780  }
10781  }
10782  /* Maximum number of tries exceeded - a very unlike event when many other
10783  threads are simultaneously touching allocations making it impossible to make
10784  lost at the same time as we try to allocate. */
10785  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10786  {
10787  return VK_ERROR_TOO_MANY_OBJECTS;
10788  }
10789  }
10790 
10791  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10792 }
10793 
10794 void VmaBlockVector::Free(
10795  VmaAllocation hAllocation)
10796 {
10797  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10798 
10799  // Scope for lock.
10800  {
10801  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10802 
10803  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10804 
10805  if(IsCorruptionDetectionEnabled())
10806  {
10807  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10808  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10809  }
10810 
10811  if(hAllocation->IsPersistentMap())
10812  {
10813  pBlock->Unmap(m_hAllocator, 1);
10814  }
10815 
10816  pBlock->m_pMetadata->Free(hAllocation);
10817  VMA_HEAVY_ASSERT(pBlock->Validate());
10818 
10819  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10820 
10821  // pBlock became empty after this deallocation.
10822  if(pBlock->m_pMetadata->IsEmpty())
10823  {
10824  // Already has empty Allocation. We don't want to have two, so delete this one.
10825  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10826  {
10827  pBlockToDelete = pBlock;
10828  Remove(pBlock);
10829  }
10830  // We now have first empty block.
10831  else
10832  {
10833  m_HasEmptyBlock = true;
10834  }
10835  }
10836  // pBlock didn't become empty, but we have another empty block - find and free that one.
10837  // (This is optional, heuristics.)
10838  else if(m_HasEmptyBlock)
10839  {
10840  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10841  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10842  {
10843  pBlockToDelete = pLastBlock;
10844  m_Blocks.pop_back();
10845  m_HasEmptyBlock = false;
10846  }
10847  }
10848 
10849  IncrementallySortBlocks();
10850  }
10851 
10852  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10853  // lock, for performance reason.
10854  if(pBlockToDelete != VMA_NULL)
10855  {
10856  VMA_DEBUG_LOG(" Deleted empty allocation");
10857  pBlockToDelete->Destroy(m_hAllocator);
10858  vma_delete(m_hAllocator, pBlockToDelete);
10859  }
10860 }
10861 
10862 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10863 {
10864  VkDeviceSize result = 0;
10865  for(size_t i = m_Blocks.size(); i--; )
10866  {
10867  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10868  if(result >= m_PreferredBlockSize)
10869  {
10870  break;
10871  }
10872  }
10873  return result;
10874 }
10875 
10876 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10877 {
10878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10879  {
10880  if(m_Blocks[blockIndex] == pBlock)
10881  {
10882  VmaVectorRemove(m_Blocks, blockIndex);
10883  return;
10884  }
10885  }
10886  VMA_ASSERT(0);
10887 }
10888 
10889 void VmaBlockVector::IncrementallySortBlocks()
10890 {
10891  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10892  {
10893  // Bubble sort only until first swap.
10894  for(size_t i = 1; i < m_Blocks.size(); ++i)
10895  {
10896  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10897  {
10898  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10899  return;
10900  }
10901  }
10902  }
10903 }
10904 
10905 VkResult VmaBlockVector::AllocateFromBlock(
10906  VmaDeviceMemoryBlock* pBlock,
10907  VmaPool hCurrentPool,
10908  uint32_t currentFrameIndex,
10909  VkDeviceSize size,
10910  VkDeviceSize alignment,
10911  VmaAllocationCreateFlags allocFlags,
10912  void* pUserData,
10913  VmaSuballocationType suballocType,
10914  uint32_t strategy,
10915  VmaAllocation* pAllocation)
10916 {
10917  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10918  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10919  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10920  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10921 
10922  VmaAllocationRequest currRequest = {};
10923  if(pBlock->m_pMetadata->CreateAllocationRequest(
10924  currentFrameIndex,
10925  m_FrameInUseCount,
10926  m_BufferImageGranularity,
10927  size,
10928  alignment,
10929  isUpperAddress,
10930  suballocType,
10931  false, // canMakeOtherLost
10932  strategy,
10933  &currRequest))
10934  {
10935  // Allocate from pCurrBlock.
10936  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10937 
10938  if(mapped)
10939  {
10940  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10941  if(res != VK_SUCCESS)
10942  {
10943  return res;
10944  }
10945  }
10946 
10947  // We no longer have an empty Allocation.
10948  if(pBlock->m_pMetadata->IsEmpty())
10949  {
10950  m_HasEmptyBlock = false;
10951  }
10952 
10953  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10954  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10955  (*pAllocation)->InitBlockAllocation(
10956  hCurrentPool,
10957  pBlock,
10958  currRequest.offset,
10959  alignment,
10960  size,
10961  suballocType,
10962  mapped,
10963  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10964  VMA_HEAVY_ASSERT(pBlock->Validate());
10965  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10966  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10967  {
10968  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10969  }
10970  if(IsCorruptionDetectionEnabled())
10971  {
10972  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10973  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10974  }
10975  return VK_SUCCESS;
10976  }
10977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10978 }
10979 
10980 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10981 {
10982  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10983  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10984  allocInfo.allocationSize = blockSize;
10985  VkDeviceMemory mem = VK_NULL_HANDLE;
10986  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10987  if(res < 0)
10988  {
10989  return res;
10990  }
10991 
10992  // New VkDeviceMemory successfully created.
10993 
10994  // Create new Allocation for it.
10995  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10996  pBlock->Init(
10997  m_hAllocator,
10998  m_MemoryTypeIndex,
10999  mem,
11000  allocInfo.allocationSize,
11001  m_NextBlockId++,
11002  m_Algorithm);
11003 
11004  m_Blocks.push_back(pBlock);
11005  if(pNewBlockIndex != VMA_NULL)
11006  {
11007  *pNewBlockIndex = m_Blocks.size() - 1;
11008  }
11009 
11010  return VK_SUCCESS;
11011 }
11012 
11013 #if VMA_STATS_STRING_ENABLED
11014 
11015 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11016 {
11017  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11018 
11019  json.BeginObject();
11020 
11021  if(m_IsCustomPool)
11022  {
11023  json.WriteString("MemoryTypeIndex");
11024  json.WriteNumber(m_MemoryTypeIndex);
11025 
11026  json.WriteString("BlockSize");
11027  json.WriteNumber(m_PreferredBlockSize);
11028 
11029  json.WriteString("BlockCount");
11030  json.BeginObject(true);
11031  if(m_MinBlockCount > 0)
11032  {
11033  json.WriteString("Min");
11034  json.WriteNumber((uint64_t)m_MinBlockCount);
11035  }
11036  if(m_MaxBlockCount < SIZE_MAX)
11037  {
11038  json.WriteString("Max");
11039  json.WriteNumber((uint64_t)m_MaxBlockCount);
11040  }
11041  json.WriteString("Cur");
11042  json.WriteNumber((uint64_t)m_Blocks.size());
11043  json.EndObject();
11044 
11045  if(m_FrameInUseCount > 0)
11046  {
11047  json.WriteString("FrameInUseCount");
11048  json.WriteNumber(m_FrameInUseCount);
11049  }
11050 
11051  if(m_Algorithm != 0)
11052  {
11053  json.WriteString("Algorithm");
11054  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11055  }
11056  }
11057  else
11058  {
11059  json.WriteString("PreferredBlockSize");
11060  json.WriteNumber(m_PreferredBlockSize);
11061  }
11062 
11063  json.WriteString("Blocks");
11064  json.BeginObject();
11065  for(size_t i = 0; i < m_Blocks.size(); ++i)
11066  {
11067  json.BeginString();
11068  json.ContinueString(m_Blocks[i]->GetId());
11069  json.EndString();
11070 
11071  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11072  }
11073  json.EndObject();
11074 
11075  json.EndObject();
11076 }
11077 
11078 #endif // #if VMA_STATS_STRING_ENABLED
11079 
11080 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11081  VmaAllocator hAllocator,
11082  uint32_t currentFrameIndex)
11083 {
11084  if(m_pDefragmentator == VMA_NULL)
11085  {
11086  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11087  hAllocator,
11088  this,
11089  currentFrameIndex);
11090  }
11091 
11092  return m_pDefragmentator;
11093 }
11094 
11095 VkResult VmaBlockVector::Defragment(
11096  VmaDefragmentationStats* pDefragmentationStats,
11097  VkDeviceSize& maxBytesToMove,
11098  uint32_t& maxAllocationsToMove)
11099 {
11100  if(m_pDefragmentator == VMA_NULL)
11101  {
11102  return VK_SUCCESS;
11103  }
11104 
11105  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11106 
11107  // Defragment.
11108  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11109 
11110  // Accumulate statistics.
11111  if(pDefragmentationStats != VMA_NULL)
11112  {
11113  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11114  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11115  pDefragmentationStats->bytesMoved += bytesMoved;
11116  pDefragmentationStats->allocationsMoved += allocationsMoved;
11117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11119  maxBytesToMove -= bytesMoved;
11120  maxAllocationsToMove -= allocationsMoved;
11121  }
11122 
11123  // Free empty blocks.
11124  m_HasEmptyBlock = false;
11125  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11126  {
11127  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11128  if(pBlock->m_pMetadata->IsEmpty())
11129  {
11130  if(m_Blocks.size() > m_MinBlockCount)
11131  {
11132  if(pDefragmentationStats != VMA_NULL)
11133  {
11134  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11135  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11136  }
11137 
11138  VmaVectorRemove(m_Blocks, blockIndex);
11139  pBlock->Destroy(m_hAllocator);
11140  vma_delete(m_hAllocator, pBlock);
11141  }
11142  else
11143  {
11144  m_HasEmptyBlock = true;
11145  }
11146  }
11147  }
11148 
11149  return result;
11150 }
11151 
11152 void VmaBlockVector::DestroyDefragmentator()
11153 {
11154  if(m_pDefragmentator != VMA_NULL)
11155  {
11156  vma_delete(m_hAllocator, m_pDefragmentator);
11157  m_pDefragmentator = VMA_NULL;
11158  }
11159 }
11160 
11161 void VmaBlockVector::MakePoolAllocationsLost(
11162  uint32_t currentFrameIndex,
11163  size_t* pLostAllocationCount)
11164 {
11165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11166  size_t lostAllocationCount = 0;
11167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11168  {
11169  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11170  VMA_ASSERT(pBlock);
11171  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11172  }
11173  if(pLostAllocationCount != VMA_NULL)
11174  {
11175  *pLostAllocationCount = lostAllocationCount;
11176  }
11177 }
11178 
11179 VkResult VmaBlockVector::CheckCorruption()
11180 {
11181  if(!IsCorruptionDetectionEnabled())
11182  {
11183  return VK_ERROR_FEATURE_NOT_PRESENT;
11184  }
11185 
11186  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11187  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11188  {
11189  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11190  VMA_ASSERT(pBlock);
11191  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11192  if(res != VK_SUCCESS)
11193  {
11194  return res;
11195  }
11196  }
11197  return VK_SUCCESS;
11198 }
11199 
11200 void VmaBlockVector::AddStats(VmaStats* pStats)
11201 {
11202  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11203  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11204 
11205  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11206 
11207  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11208  {
11209  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11210  VMA_ASSERT(pBlock);
11211  VMA_HEAVY_ASSERT(pBlock->Validate());
11212  VmaStatInfo allocationStatInfo;
11213  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11214  VmaAddStatInfo(pStats->total, allocationStatInfo);
11215  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11216  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11217  }
11218 }
11219 
11221 // VmaDefragmentator members definition
11222 
11223 VmaDefragmentator::VmaDefragmentator(
11224  VmaAllocator hAllocator,
11225  VmaBlockVector* pBlockVector,
11226  uint32_t currentFrameIndex) :
11227  m_hAllocator(hAllocator),
11228  m_pBlockVector(pBlockVector),
11229  m_CurrentFrameIndex(currentFrameIndex),
11230  m_BytesMoved(0),
11231  m_AllocationsMoved(0),
11232  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11233  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11234 {
11235  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11236 }
11237 
11238 VmaDefragmentator::~VmaDefragmentator()
11239 {
11240  for(size_t i = m_Blocks.size(); i--; )
11241  {
11242  vma_delete(m_hAllocator, m_Blocks[i]);
11243  }
11244 }
11245 
11246 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11247 {
11248  AllocationInfo allocInfo;
11249  allocInfo.m_hAllocation = hAlloc;
11250  allocInfo.m_pChanged = pChanged;
11251  m_Allocations.push_back(allocInfo);
11252 }
11253 
11254 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11255 {
11256  // It has already been mapped for defragmentation.
11257  if(m_pMappedDataForDefragmentation)
11258  {
11259  *ppMappedData = m_pMappedDataForDefragmentation;
11260  return VK_SUCCESS;
11261  }
11262 
11263  // It is originally mapped.
11264  if(m_pBlock->GetMappedData())
11265  {
11266  *ppMappedData = m_pBlock->GetMappedData();
11267  return VK_SUCCESS;
11268  }
11269 
11270  // Map on first usage.
11271  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11272  *ppMappedData = m_pMappedDataForDefragmentation;
11273  return res;
11274 }
11275 
11276 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11277 {
11278  if(m_pMappedDataForDefragmentation != VMA_NULL)
11279  {
11280  m_pBlock->Unmap(hAllocator, 1);
11281  }
11282 }
11283 
11284 VkResult VmaDefragmentator::DefragmentRound(
11285  VkDeviceSize maxBytesToMove,
11286  uint32_t maxAllocationsToMove)
11287 {
11288  if(m_Blocks.empty())
11289  {
11290  return VK_SUCCESS;
11291  }
11292 
11293  size_t srcBlockIndex = m_Blocks.size() - 1;
11294  size_t srcAllocIndex = SIZE_MAX;
11295  for(;;)
11296  {
11297  // 1. Find next allocation to move.
11298  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11299  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11300  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11301  {
11302  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11303  {
11304  // Finished: no more allocations to process.
11305  if(srcBlockIndex == 0)
11306  {
11307  return VK_SUCCESS;
11308  }
11309  else
11310  {
11311  --srcBlockIndex;
11312  srcAllocIndex = SIZE_MAX;
11313  }
11314  }
11315  else
11316  {
11317  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11318  }
11319  }
11320 
11321  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11322  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11323 
11324  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11325  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11326  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11327  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11328 
11329  // 2. Try to find new place for this allocation in preceding or current block.
11330  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11331  {
11332  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11333  VmaAllocationRequest dstAllocRequest;
11334  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11335  m_CurrentFrameIndex,
11336  m_pBlockVector->GetFrameInUseCount(),
11337  m_pBlockVector->GetBufferImageGranularity(),
11338  size,
11339  alignment,
11340  false, // upperAddress
11341  suballocType,
11342  false, // canMakeOtherLost
11344  &dstAllocRequest) &&
11345  MoveMakesSense(
11346  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11347  {
11348  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11349 
11350  // Reached limit on number of allocations or bytes to move.
11351  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11352  (m_BytesMoved + size > maxBytesToMove))
11353  {
11354  return VK_INCOMPLETE;
11355  }
11356 
11357  void* pDstMappedData = VMA_NULL;
11358  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11359  if(res != VK_SUCCESS)
11360  {
11361  return res;
11362  }
11363 
11364  void* pSrcMappedData = VMA_NULL;
11365  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11366  if(res != VK_SUCCESS)
11367  {
11368  return res;
11369  }
11370 
11371  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11372  memcpy(
11373  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11374  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11375  static_cast<size_t>(size));
11376 
11377  if(VMA_DEBUG_MARGIN > 0)
11378  {
11379  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11380  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11381  }
11382 
11383  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11384  dstAllocRequest,
11385  suballocType,
11386  size,
11387  false, // upperAddress
11388  allocInfo.m_hAllocation);
11389  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11390 
11391  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11392 
11393  if(allocInfo.m_pChanged != VMA_NULL)
11394  {
11395  *allocInfo.m_pChanged = VK_TRUE;
11396  }
11397 
11398  ++m_AllocationsMoved;
11399  m_BytesMoved += size;
11400 
11401  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11402 
11403  break;
11404  }
11405  }
11406 
11407  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11408 
11409  if(srcAllocIndex > 0)
11410  {
11411  --srcAllocIndex;
11412  }
11413  else
11414  {
11415  if(srcBlockIndex > 0)
11416  {
11417  --srcBlockIndex;
11418  srcAllocIndex = SIZE_MAX;
11419  }
11420  else
11421  {
11422  return VK_SUCCESS;
11423  }
11424  }
11425  }
11426 }
11427 
11428 VkResult VmaDefragmentator::Defragment(
11429  VkDeviceSize maxBytesToMove,
11430  uint32_t maxAllocationsToMove)
11431 {
11432  if(m_Allocations.empty())
11433  {
11434  return VK_SUCCESS;
11435  }
11436 
11437  // Create block info for each block.
11438  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11439  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11440  {
11441  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11442  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11443  m_Blocks.push_back(pBlockInfo);
11444  }
11445 
11446  // Sort them by m_pBlock pointer value.
11447  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11448 
11449  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11450  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11451  {
11452  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11453  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11454  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11455  {
11456  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11457  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11458  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11459  {
11460  (*it)->m_Allocations.push_back(allocInfo);
11461  }
11462  else
11463  {
11464  VMA_ASSERT(0);
11465  }
11466  }
11467  }
11468  m_Allocations.clear();
11469 
11470  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11471  {
11472  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11473  pBlockInfo->CalcHasNonMovableAllocations();
11474  pBlockInfo->SortAllocationsBySizeDescecnding();
11475  }
11476 
11477  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11478  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11479 
11480  // Execute defragmentation rounds (the main part).
11481  VkResult result = VK_SUCCESS;
11482  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11483  {
11484  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11485  }
11486 
11487  // Unmap blocks that were mapped for defragmentation.
11488  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11489  {
11490  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11491  }
11492 
11493  return result;
11494 }
11495 
11496 bool VmaDefragmentator::MoveMakesSense(
11497  size_t dstBlockIndex, VkDeviceSize dstOffset,
11498  size_t srcBlockIndex, VkDeviceSize srcOffset)
11499 {
11500  if(dstBlockIndex < srcBlockIndex)
11501  {
11502  return true;
11503  }
11504  if(dstBlockIndex > srcBlockIndex)
11505  {
11506  return false;
11507  }
11508  if(dstOffset < srcOffset)
11509  {
11510  return true;
11511  }
11512  return false;
11513 }
11514 
11516 // VmaRecorder
11517 
11518 #if VMA_RECORDING_ENABLED
11519 
11520 VmaRecorder::VmaRecorder() :
11521  m_UseMutex(true),
11522  m_Flags(0),
11523  m_File(VMA_NULL),
11524  m_Freq(INT64_MAX),
11525  m_StartCounter(INT64_MAX)
11526 {
11527 }
11528 
11529 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11530 {
11531  m_UseMutex = useMutex;
11532  m_Flags = settings.flags;
11533 
11534  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11535  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11536 
11537  // Open file for writing.
11538  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11539  if(err != 0)
11540  {
11541  return VK_ERROR_INITIALIZATION_FAILED;
11542  }
11543 
11544  // Write header.
11545  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11546  fprintf(m_File, "%s\n", "1,4");
11547 
11548  return VK_SUCCESS;
11549 }
11550 
11551 VmaRecorder::~VmaRecorder()
11552 {
11553  if(m_File != VMA_NULL)
11554  {
11555  fclose(m_File);
11556  }
11557 }
11558 
11559 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11560 {
11561  CallParams callParams;
11562  GetBasicParams(callParams);
11563 
11564  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11565  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11566  Flush();
11567 }
11568 
11569 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11570 {
11571  CallParams callParams;
11572  GetBasicParams(callParams);
11573 
11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11575  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11576  Flush();
11577 }
11578 
11579 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11580 {
11581  CallParams callParams;
11582  GetBasicParams(callParams);
11583 
11584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11585  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11586  createInfo.memoryTypeIndex,
11587  createInfo.flags,
11588  createInfo.blockSize,
11589  (uint64_t)createInfo.minBlockCount,
11590  (uint64_t)createInfo.maxBlockCount,
11591  createInfo.frameInUseCount,
11592  pool);
11593  Flush();
11594 }
11595 
11596 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11597 {
11598  CallParams callParams;
11599  GetBasicParams(callParams);
11600 
11601  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11602  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11603  pool);
11604  Flush();
11605 }
11606 
11607 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11608  const VkMemoryRequirements& vkMemReq,
11609  const VmaAllocationCreateInfo& createInfo,
11610  VmaAllocation allocation)
11611 {
11612  CallParams callParams;
11613  GetBasicParams(callParams);
11614 
11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11618  vkMemReq.size,
11619  vkMemReq.alignment,
11620  vkMemReq.memoryTypeBits,
11621  createInfo.flags,
11622  createInfo.usage,
11623  createInfo.requiredFlags,
11624  createInfo.preferredFlags,
11625  createInfo.memoryTypeBits,
11626  createInfo.pool,
11627  allocation,
11628  userDataStr.GetString());
11629  Flush();
11630 }
11631 
11632 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11633  const VkMemoryRequirements& vkMemReq,
11634  bool requiresDedicatedAllocation,
11635  bool prefersDedicatedAllocation,
11636  const VmaAllocationCreateInfo& createInfo,
11637  VmaAllocation allocation)
11638 {
11639  CallParams callParams;
11640  GetBasicParams(callParams);
11641 
11642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11643  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11644  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11645  vkMemReq.size,
11646  vkMemReq.alignment,
11647  vkMemReq.memoryTypeBits,
11648  requiresDedicatedAllocation ? 1 : 0,
11649  prefersDedicatedAllocation ? 1 : 0,
11650  createInfo.flags,
11651  createInfo.usage,
11652  createInfo.requiredFlags,
11653  createInfo.preferredFlags,
11654  createInfo.memoryTypeBits,
11655  createInfo.pool,
11656  allocation,
11657  userDataStr.GetString());
11658  Flush();
11659 }
11660 
11661 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11662  const VkMemoryRequirements& vkMemReq,
11663  bool requiresDedicatedAllocation,
11664  bool prefersDedicatedAllocation,
11665  const VmaAllocationCreateInfo& createInfo,
11666  VmaAllocation allocation)
11667 {
11668  CallParams callParams;
11669  GetBasicParams(callParams);
11670 
11671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11674  vkMemReq.size,
11675  vkMemReq.alignment,
11676  vkMemReq.memoryTypeBits,
11677  requiresDedicatedAllocation ? 1 : 0,
11678  prefersDedicatedAllocation ? 1 : 0,
11679  createInfo.flags,
11680  createInfo.usage,
11681  createInfo.requiredFlags,
11682  createInfo.preferredFlags,
11683  createInfo.memoryTypeBits,
11684  createInfo.pool,
11685  allocation,
11686  userDataStr.GetString());
11687  Flush();
11688 }
11689 
11690 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11691  VmaAllocation allocation)
11692 {
11693  CallParams callParams;
11694  GetBasicParams(callParams);
11695 
11696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11697  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11698  allocation);
11699  Flush();
11700 }
11701 
11702 void VmaRecorder::RecordResizeAllocation(
11703  uint32_t frameIndex,
11704  VmaAllocation allocation,
11705  VkDeviceSize newSize)
11706 {
11707  CallParams callParams;
11708  GetBasicParams(callParams);
11709 
11710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11711  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11712  allocation, newSize);
11713  Flush();
11714 }
11715 
11716 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11717  VmaAllocation allocation,
11718  const void* pUserData)
11719 {
11720  CallParams callParams;
11721  GetBasicParams(callParams);
11722 
11723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11724  UserDataString userDataStr(
11725  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11726  pUserData);
11727  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11728  allocation,
11729  userDataStr.GetString());
11730  Flush();
11731 }
11732 
11733 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11734  VmaAllocation allocation)
11735 {
11736  CallParams callParams;
11737  GetBasicParams(callParams);
11738 
11739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11740  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11741  allocation);
11742  Flush();
11743 }
11744 
11745 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11746  VmaAllocation allocation)
11747 {
11748  CallParams callParams;
11749  GetBasicParams(callParams);
11750 
11751  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11752  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11753  allocation);
11754  Flush();
11755 }
11756 
11757 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11758  VmaAllocation allocation)
11759 {
11760  CallParams callParams;
11761  GetBasicParams(callParams);
11762 
11763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11764  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11765  allocation);
11766  Flush();
11767 }
11768 
11769 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11770  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11771 {
11772  CallParams callParams;
11773  GetBasicParams(callParams);
11774 
11775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11776  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11777  allocation,
11778  offset,
11779  size);
11780  Flush();
11781 }
11782 
11783 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11784  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11785 {
11786  CallParams callParams;
11787  GetBasicParams(callParams);
11788 
11789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11790  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11791  allocation,
11792  offset,
11793  size);
11794  Flush();
11795 }
11796 
11797 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11798  const VkBufferCreateInfo& bufCreateInfo,
11799  const VmaAllocationCreateInfo& allocCreateInfo,
11800  VmaAllocation allocation)
11801 {
11802  CallParams callParams;
11803  GetBasicParams(callParams);
11804 
11805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11806  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11807  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11808  bufCreateInfo.flags,
11809  bufCreateInfo.size,
11810  bufCreateInfo.usage,
11811  bufCreateInfo.sharingMode,
11812  allocCreateInfo.flags,
11813  allocCreateInfo.usage,
11814  allocCreateInfo.requiredFlags,
11815  allocCreateInfo.preferredFlags,
11816  allocCreateInfo.memoryTypeBits,
11817  allocCreateInfo.pool,
11818  allocation,
11819  userDataStr.GetString());
11820  Flush();
11821 }
11822 
11823 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11824  const VkImageCreateInfo& imageCreateInfo,
11825  const VmaAllocationCreateInfo& allocCreateInfo,
11826  VmaAllocation allocation)
11827 {
11828  CallParams callParams;
11829  GetBasicParams(callParams);
11830 
11831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11832  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11833  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11834  imageCreateInfo.flags,
11835  imageCreateInfo.imageType,
11836  imageCreateInfo.format,
11837  imageCreateInfo.extent.width,
11838  imageCreateInfo.extent.height,
11839  imageCreateInfo.extent.depth,
11840  imageCreateInfo.mipLevels,
11841  imageCreateInfo.arrayLayers,
11842  imageCreateInfo.samples,
11843  imageCreateInfo.tiling,
11844  imageCreateInfo.usage,
11845  imageCreateInfo.sharingMode,
11846  imageCreateInfo.initialLayout,
11847  allocCreateInfo.flags,
11848  allocCreateInfo.usage,
11849  allocCreateInfo.requiredFlags,
11850  allocCreateInfo.preferredFlags,
11851  allocCreateInfo.memoryTypeBits,
11852  allocCreateInfo.pool,
11853  allocation,
11854  userDataStr.GetString());
11855  Flush();
11856 }
11857 
11858 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11859  VmaAllocation allocation)
11860 {
11861  CallParams callParams;
11862  GetBasicParams(callParams);
11863 
11864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11865  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11866  allocation);
11867  Flush();
11868 }
11869 
11870 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11871  VmaAllocation allocation)
11872 {
11873  CallParams callParams;
11874  GetBasicParams(callParams);
11875 
11876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11877  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11878  allocation);
11879  Flush();
11880 }
11881 
11882 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11883  VmaAllocation allocation)
11884 {
11885  CallParams callParams;
11886  GetBasicParams(callParams);
11887 
11888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11889  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11890  allocation);
11891  Flush();
11892 }
11893 
11894 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11895  VmaAllocation allocation)
11896 {
11897  CallParams callParams;
11898  GetBasicParams(callParams);
11899 
11900  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11901  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11902  allocation);
11903  Flush();
11904 }
11905 
11906 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11907  VmaPool pool)
11908 {
11909  CallParams callParams;
11910  GetBasicParams(callParams);
11911 
11912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11913  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11914  pool);
11915  Flush();
11916 }
11917 
11918 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11919 {
11920  if(pUserData != VMA_NULL)
11921  {
11922  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11923  {
11924  m_Str = (const char*)pUserData;
11925  }
11926  else
11927  {
11928  sprintf_s(m_PtrStr, "%p", pUserData);
11929  m_Str = m_PtrStr;
11930  }
11931  }
11932  else
11933  {
11934  m_Str = "";
11935  }
11936 }
11937 
11938 void VmaRecorder::WriteConfiguration(
11939  const VkPhysicalDeviceProperties& devProps,
11940  const VkPhysicalDeviceMemoryProperties& memProps,
11941  bool dedicatedAllocationExtensionEnabled)
11942 {
11943  fprintf(m_File, "Config,Begin\n");
11944 
11945  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11946  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11947  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11948  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11949  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11950  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11951 
11952  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11953  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11954  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11955 
11956  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11957  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11958  {
11959  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11960  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11961  }
11962  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11963  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11964  {
11965  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11966  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11967  }
11968 
11969  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11970 
11971  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11972  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11973  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11974  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11976  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11977  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11978  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11979  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11980 
11981  fprintf(m_File, "Config,End\n");
11982 }
11983 
11984 void VmaRecorder::GetBasicParams(CallParams& outParams)
11985 {
11986  outParams.threadId = GetCurrentThreadId();
11987 
11988  LARGE_INTEGER counter;
11989  QueryPerformanceCounter(&counter);
11990  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11991 }
11992 
11993 void VmaRecorder::Flush()
11994 {
11995  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11996  {
11997  fflush(m_File);
11998  }
11999 }
12000 
12001 #endif // #if VMA_RECORDING_ENABLED
12002 
12004 // VmaAllocator_T
12005 
12006 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12007  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12008  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12009  m_hDevice(pCreateInfo->device),
12010  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12011  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12012  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12013  m_PreferredLargeHeapBlockSize(0),
12014  m_PhysicalDevice(pCreateInfo->physicalDevice),
12015  m_CurrentFrameIndex(0),
12016  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12017  m_NextPoolId(0)
12019  ,m_pRecorder(VMA_NULL)
12020 #endif
12021 {
12022  if(VMA_DEBUG_DETECT_CORRUPTION)
12023  {
12024  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12025  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12026  }
12027 
12028  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12029 
12030 #if !(VMA_DEDICATED_ALLOCATION)
12032  {
12033  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12034  }
12035 #endif
12036 
12037  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12038  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12039  memset(&m_MemProps, 0, sizeof(m_MemProps));
12040 
12041  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12042  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12043 
12044  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12045  {
12046  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12047  }
12048 
12049  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12050  {
12051  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12052  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12053  }
12054 
12055  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12056 
12057  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12058  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12059 
12060  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12062  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12064 
12065  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12066  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12067 
12068  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12069  {
12070  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12071  {
12072  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12073  if(limit != VK_WHOLE_SIZE)
12074  {
12075  m_HeapSizeLimit[heapIndex] = limit;
12076  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12077  {
12078  m_MemProps.memoryHeaps[heapIndex].size = limit;
12079  }
12080  }
12081  }
12082  }
12083 
12084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12085  {
12086  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12087 
12088  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12089  this,
12090  memTypeIndex,
12091  preferredBlockSize,
12092  0,
12093  SIZE_MAX,
12094  GetBufferImageGranularity(),
12095  pCreateInfo->frameInUseCount,
12096  false, // isCustomPool
12097  false, // explicitBlockSize
12098  false); // linearAlgorithm
12099  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12100  // becase minBlockCount is 0.
12101  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12102 
12103  }
12104 }
12105 
12106 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12107 {
12108  VkResult res = VK_SUCCESS;
12109 
12110  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12111  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12112  {
12113 #if VMA_RECORDING_ENABLED
12114  m_pRecorder = vma_new(this, VmaRecorder)();
12115  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12116  if(res != VK_SUCCESS)
12117  {
12118  return res;
12119  }
12120  m_pRecorder->WriteConfiguration(
12121  m_PhysicalDeviceProperties,
12122  m_MemProps,
12123  m_UseKhrDedicatedAllocation);
12124  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12125 #else
12126  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12127  return VK_ERROR_FEATURE_NOT_PRESENT;
12128 #endif
12129  }
12130 
12131  return res;
12132 }
12133 
12134 VmaAllocator_T::~VmaAllocator_T()
12135 {
12136 #if VMA_RECORDING_ENABLED
12137  if(m_pRecorder != VMA_NULL)
12138  {
12139  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12140  vma_delete(this, m_pRecorder);
12141  }
12142 #endif
12143 
12144  VMA_ASSERT(m_Pools.empty());
12145 
12146  for(size_t i = GetMemoryTypeCount(); i--; )
12147  {
12148  vma_delete(this, m_pDedicatedAllocations[i]);
12149  vma_delete(this, m_pBlockVectors[i]);
12150  }
12151 }
12152 
12153 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12154 {
12155 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12156  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12157  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12158  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12159  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12160  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12161  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12162  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12163  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12164  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12165  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12166  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12167  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12168  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12169  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12170  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12171  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12172 #if VMA_DEDICATED_ALLOCATION
12173  if(m_UseKhrDedicatedAllocation)
12174  {
12175  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12176  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12177  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12178  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12179  }
12180 #endif // #if VMA_DEDICATED_ALLOCATION
12181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12182 
12183 #define VMA_COPY_IF_NOT_NULL(funcName) \
12184  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12185 
12186  if(pVulkanFunctions != VMA_NULL)
12187  {
12188  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12189  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12190  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12191  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12192  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12193  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12194  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12195  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12196  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12197  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12198  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12199  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12200  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12201  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12202  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12203  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12204 #if VMA_DEDICATED_ALLOCATION
12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12207 #endif
12208  }
12209 
12210 #undef VMA_COPY_IF_NOT_NULL
12211 
12212  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12213  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12214  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12215  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12216  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12217  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12230 #if VMA_DEDICATED_ALLOCATION
12231  if(m_UseKhrDedicatedAllocation)
12232  {
12233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12235  }
12236 #endif
12237 }
12238 
12239 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12240 {
12241  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12242  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12243  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12244  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12245 }
12246 
12247 VkResult VmaAllocator_T::AllocateMemoryOfType(
12248  VkDeviceSize size,
12249  VkDeviceSize alignment,
12250  bool dedicatedAllocation,
12251  VkBuffer dedicatedBuffer,
12252  VkImage dedicatedImage,
12253  const VmaAllocationCreateInfo& createInfo,
12254  uint32_t memTypeIndex,
12255  VmaSuballocationType suballocType,
12256  VmaAllocation* pAllocation)
12257 {
12258  VMA_ASSERT(pAllocation != VMA_NULL);
12259  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12260 
12261  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12262 
12263  // If memory type is not HOST_VISIBLE, disable MAPPED.
12264  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12265  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12266  {
12267  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12268  }
12269 
12270  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12271  VMA_ASSERT(blockVector);
12272 
12273  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12274  bool preferDedicatedMemory =
12275  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12276  dedicatedAllocation ||
12277  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12278  size > preferredBlockSize / 2;
12279 
12280  if(preferDedicatedMemory &&
12281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12282  finalCreateInfo.pool == VK_NULL_HANDLE)
12283  {
12285  }
12286 
12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12288  {
12289  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12290  {
12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12292  }
12293  else
12294  {
12295  return AllocateDedicatedMemory(
12296  size,
12297  suballocType,
12298  memTypeIndex,
12299  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12301  finalCreateInfo.pUserData,
12302  dedicatedBuffer,
12303  dedicatedImage,
12304  pAllocation);
12305  }
12306  }
12307  else
12308  {
12309  VkResult res = blockVector->Allocate(
12310  VK_NULL_HANDLE, // hCurrentPool
12311  m_CurrentFrameIndex.load(),
12312  size,
12313  alignment,
12314  finalCreateInfo,
12315  suballocType,
12316  pAllocation);
12317  if(res == VK_SUCCESS)
12318  {
12319  return res;
12320  }
12321 
12322  // 5. Try dedicated memory.
12323  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12324  {
12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12326  }
12327  else
12328  {
12329  res = AllocateDedicatedMemory(
12330  size,
12331  suballocType,
12332  memTypeIndex,
12333  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12334  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12335  finalCreateInfo.pUserData,
12336  dedicatedBuffer,
12337  dedicatedImage,
12338  pAllocation);
12339  if(res == VK_SUCCESS)
12340  {
12341  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12342  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12343  return VK_SUCCESS;
12344  }
12345  else
12346  {
12347  // Everything failed: Return error code.
12348  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12349  return res;
12350  }
12351  }
12352  }
12353 }
12354 
12355 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12356  VkDeviceSize size,
12357  VmaSuballocationType suballocType,
12358  uint32_t memTypeIndex,
12359  bool map,
12360  bool isUserDataString,
12361  void* pUserData,
12362  VkBuffer dedicatedBuffer,
12363  VkImage dedicatedImage,
12364  VmaAllocation* pAllocation)
12365 {
12366  VMA_ASSERT(pAllocation);
12367 
12368  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12369  allocInfo.memoryTypeIndex = memTypeIndex;
12370  allocInfo.allocationSize = size;
12371 
12372 #if VMA_DEDICATED_ALLOCATION
12373  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12374  if(m_UseKhrDedicatedAllocation)
12375  {
12376  if(dedicatedBuffer != VK_NULL_HANDLE)
12377  {
12378  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12379  dedicatedAllocInfo.buffer = dedicatedBuffer;
12380  allocInfo.pNext = &dedicatedAllocInfo;
12381  }
12382  else if(dedicatedImage != VK_NULL_HANDLE)
12383  {
12384  dedicatedAllocInfo.image = dedicatedImage;
12385  allocInfo.pNext = &dedicatedAllocInfo;
12386  }
12387  }
12388 #endif // #if VMA_DEDICATED_ALLOCATION
12389 
12390  // Allocate VkDeviceMemory.
12391  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12392  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12393  if(res < 0)
12394  {
12395  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12396  return res;
12397  }
12398 
12399  void* pMappedData = VMA_NULL;
12400  if(map)
12401  {
12402  res = (*m_VulkanFunctions.vkMapMemory)(
12403  m_hDevice,
12404  hMemory,
12405  0,
12406  VK_WHOLE_SIZE,
12407  0,
12408  &pMappedData);
12409  if(res < 0)
12410  {
12411  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12412  FreeVulkanMemory(memTypeIndex, size, hMemory);
12413  return res;
12414  }
12415  }
12416 
12417  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12418  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12419  (*pAllocation)->SetUserData(this, pUserData);
12420  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12421  {
12422  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12423  }
12424 
12425  // Register it in m_pDedicatedAllocations.
12426  {
12427  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12428  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12429  VMA_ASSERT(pDedicatedAllocations);
12430  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12431  }
12432 
12433  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12434 
12435  return VK_SUCCESS;
12436 }
12437 
12438 void VmaAllocator_T::GetBufferMemoryRequirements(
12439  VkBuffer hBuffer,
12440  VkMemoryRequirements& memReq,
12441  bool& requiresDedicatedAllocation,
12442  bool& prefersDedicatedAllocation) const
12443 {
12444 #if VMA_DEDICATED_ALLOCATION
12445  if(m_UseKhrDedicatedAllocation)
12446  {
12447  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12448  memReqInfo.buffer = hBuffer;
12449 
12450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12451 
12452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12453  memReq2.pNext = &memDedicatedReq;
12454 
12455  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12456 
12457  memReq = memReq2.memoryRequirements;
12458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12460  }
12461  else
12462 #endif // #if VMA_DEDICATED_ALLOCATION
12463  {
12464  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12465  requiresDedicatedAllocation = false;
12466  prefersDedicatedAllocation = false;
12467  }
12468 }
12469 
12470 void VmaAllocator_T::GetImageMemoryRequirements(
12471  VkImage hImage,
12472  VkMemoryRequirements& memReq,
12473  bool& requiresDedicatedAllocation,
12474  bool& prefersDedicatedAllocation) const
12475 {
12476 #if VMA_DEDICATED_ALLOCATION
12477  if(m_UseKhrDedicatedAllocation)
12478  {
12479  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12480  memReqInfo.image = hImage;
12481 
12482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12483 
12484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12485  memReq2.pNext = &memDedicatedReq;
12486 
12487  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12488 
12489  memReq = memReq2.memoryRequirements;
12490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12492  }
12493  else
12494 #endif // #if VMA_DEDICATED_ALLOCATION
12495  {
12496  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12497  requiresDedicatedAllocation = false;
12498  prefersDedicatedAllocation = false;
12499  }
12500 }
12501 
12502 VkResult VmaAllocator_T::AllocateMemory(
12503  const VkMemoryRequirements& vkMemReq,
12504  bool requiresDedicatedAllocation,
12505  bool prefersDedicatedAllocation,
12506  VkBuffer dedicatedBuffer,
12507  VkImage dedicatedImage,
12508  const VmaAllocationCreateInfo& createInfo,
12509  VmaSuballocationType suballocType,
12510  VmaAllocation* pAllocation)
12511 {
12512  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12513 
12514  if(vkMemReq.size == 0)
12515  {
12516  return VK_ERROR_VALIDATION_FAILED_EXT;
12517  }
12518  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12519  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12520  {
12521  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12522  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12523  }
12524  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12526  {
12527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12529  }
12530  if(requiresDedicatedAllocation)
12531  {
12532  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12533  {
12534  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12536  }
12537  if(createInfo.pool != VK_NULL_HANDLE)
12538  {
12539  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12541  }
12542  }
12543  if((createInfo.pool != VK_NULL_HANDLE) &&
12544  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12545  {
12546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12548  }
12549 
12550  if(createInfo.pool != VK_NULL_HANDLE)
12551  {
12552  const VkDeviceSize alignmentForPool = VMA_MAX(
12553  vkMemReq.alignment,
12554  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12555  return createInfo.pool->m_BlockVector.Allocate(
12556  createInfo.pool,
12557  m_CurrentFrameIndex.load(),
12558  vkMemReq.size,
12559  alignmentForPool,
12560  createInfo,
12561  suballocType,
12562  pAllocation);
12563  }
12564  else
12565  {
12566  // Bit mask of memory Vulkan types acceptable for this allocation.
12567  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12568  uint32_t memTypeIndex = UINT32_MAX;
12569  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12570  if(res == VK_SUCCESS)
12571  {
12572  VkDeviceSize alignmentForMemType = VMA_MAX(
12573  vkMemReq.alignment,
12574  GetMemoryTypeMinAlignment(memTypeIndex));
12575 
12576  res = AllocateMemoryOfType(
12577  vkMemReq.size,
12578  alignmentForMemType,
12579  requiresDedicatedAllocation || prefersDedicatedAllocation,
12580  dedicatedBuffer,
12581  dedicatedImage,
12582  createInfo,
12583  memTypeIndex,
12584  suballocType,
12585  pAllocation);
12586  // Succeeded on first try.
12587  if(res == VK_SUCCESS)
12588  {
12589  return res;
12590  }
12591  // Allocation from this memory type failed. Try other compatible memory types.
12592  else
12593  {
12594  for(;;)
12595  {
12596  // Remove old memTypeIndex from list of possibilities.
12597  memoryTypeBits &= ~(1u << memTypeIndex);
12598  // Find alternative memTypeIndex.
12599  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12600  if(res == VK_SUCCESS)
12601  {
12602  alignmentForMemType = VMA_MAX(
12603  vkMemReq.alignment,
12604  GetMemoryTypeMinAlignment(memTypeIndex));
12605 
12606  res = AllocateMemoryOfType(
12607  vkMemReq.size,
12608  alignmentForMemType,
12609  requiresDedicatedAllocation || prefersDedicatedAllocation,
12610  dedicatedBuffer,
12611  dedicatedImage,
12612  createInfo,
12613  memTypeIndex,
12614  suballocType,
12615  pAllocation);
12616  // Allocation from this alternative memory type succeeded.
12617  if(res == VK_SUCCESS)
12618  {
12619  return res;
12620  }
12621  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12622  }
12623  // No other matching memory type index could be found.
12624  else
12625  {
12626  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12627  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12628  }
12629  }
12630  }
12631  }
12632  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12633  else
12634  return res;
12635  }
12636 }
12637 
12638 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12639 {
12640  VMA_ASSERT(allocation);
12641 
12642  if(TouchAllocation(allocation))
12643  {
12644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12645  {
12646  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12647  }
12648 
12649  switch(allocation->GetType())
12650  {
12651  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12652  {
12653  VmaBlockVector* pBlockVector = VMA_NULL;
12654  VmaPool hPool = allocation->GetPool();
12655  if(hPool != VK_NULL_HANDLE)
12656  {
12657  pBlockVector = &hPool->m_BlockVector;
12658  }
12659  else
12660  {
12661  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12662  pBlockVector = m_pBlockVectors[memTypeIndex];
12663  }
12664  pBlockVector->Free(allocation);
12665  }
12666  break;
12667  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12668  FreeDedicatedMemory(allocation);
12669  break;
12670  default:
12671  VMA_ASSERT(0);
12672  }
12673  }
12674 
12675  allocation->SetUserData(this, VMA_NULL);
12676  vma_delete(this, allocation);
12677 }
12678 
12679 VkResult VmaAllocator_T::ResizeAllocation(
12680  const VmaAllocation alloc,
12681  VkDeviceSize newSize)
12682 {
12683  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12684  {
12685  return VK_ERROR_VALIDATION_FAILED_EXT;
12686  }
12687  if(newSize == alloc->GetSize())
12688  {
12689  return VK_SUCCESS;
12690  }
12691 
12692  switch(alloc->GetType())
12693  {
12694  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12695  return VK_ERROR_FEATURE_NOT_PRESENT;
12696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12697  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12698  {
12699  alloc->ChangeSize(newSize);
12700  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12701  return VK_SUCCESS;
12702  }
12703  else
12704  {
12705  return VK_ERROR_OUT_OF_POOL_MEMORY;
12706  }
12707  default:
12708  VMA_ASSERT(0);
12709  return VK_ERROR_VALIDATION_FAILED_EXT;
12710  }
12711 }
12712 
12713 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12714 {
12715  // Initialize.
12716  InitStatInfo(pStats->total);
12717  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12718  InitStatInfo(pStats->memoryType[i]);
12719  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12720  InitStatInfo(pStats->memoryHeap[i]);
12721 
12722  // Process default pools.
12723  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12724  {
12725  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12726  VMA_ASSERT(pBlockVector);
12727  pBlockVector->AddStats(pStats);
12728  }
12729 
12730  // Process custom pools.
12731  {
12732  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12733  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12734  {
12735  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12736  }
12737  }
12738 
12739  // Process dedicated allocations.
12740  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12741  {
12742  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12743  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12744  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12745  VMA_ASSERT(pDedicatedAllocVector);
12746  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12747  {
12748  VmaStatInfo allocationStatInfo;
12749  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12750  VmaAddStatInfo(pStats->total, allocationStatInfo);
12751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12753  }
12754  }
12755 
12756  // Postprocess.
12757  VmaPostprocessCalcStatInfo(pStats->total);
12758  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12759  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12760  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12761  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12762 }
12763 
12764 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12765 
12766 VkResult VmaAllocator_T::Defragment(
12767  VmaAllocation* pAllocations,
12768  size_t allocationCount,
12769  VkBool32* pAllocationsChanged,
12770  const VmaDefragmentationInfo* pDefragmentationInfo,
12771  VmaDefragmentationStats* pDefragmentationStats)
12772 {
12773  if(pAllocationsChanged != VMA_NULL)
12774  {
12775  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12776  }
12777  if(pDefragmentationStats != VMA_NULL)
12778  {
12779  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12780  }
12781 
12782  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12783 
12784  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12785 
12786  const size_t poolCount = m_Pools.size();
12787 
12788  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12789  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12790  {
12791  VmaAllocation hAlloc = pAllocations[allocIndex];
12792  VMA_ASSERT(hAlloc);
12793  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12794  // DedicatedAlloc cannot be defragmented.
12795  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12796  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12797  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12798  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12799  // Lost allocation cannot be defragmented.
12800  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12801  {
12802  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12803 
12804  const VmaPool hAllocPool = hAlloc->GetPool();
12805  // This allocation belongs to custom pool.
12806  if(hAllocPool != VK_NULL_HANDLE)
12807  {
12808  // Pools with linear or buddy algorithm are not defragmented.
12809  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12810  {
12811  pAllocBlockVector = &hAllocPool->m_BlockVector;
12812  }
12813  }
12814  // This allocation belongs to general pool.
12815  else
12816  {
12817  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12818  }
12819 
12820  if(pAllocBlockVector != VMA_NULL)
12821  {
12822  VmaDefragmentator* const pDefragmentator =
12823  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12824  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12825  &pAllocationsChanged[allocIndex] : VMA_NULL;
12826  pDefragmentator->AddAllocation(hAlloc, pChanged);
12827  }
12828  }
12829  }
12830 
12831  VkResult result = VK_SUCCESS;
12832 
12833  // ======== Main processing.
12834 
12835  VkDeviceSize maxBytesToMove = SIZE_MAX;
12836  uint32_t maxAllocationsToMove = UINT32_MAX;
12837  if(pDefragmentationInfo != VMA_NULL)
12838  {
12839  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12840  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12841  }
12842 
12843  // Process standard memory.
12844  for(uint32_t memTypeIndex = 0;
12845  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12846  ++memTypeIndex)
12847  {
12848  // Only HOST_VISIBLE memory types can be defragmented.
12849  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12850  {
12851  result = m_pBlockVectors[memTypeIndex]->Defragment(
12852  pDefragmentationStats,
12853  maxBytesToMove,
12854  maxAllocationsToMove);
12855  }
12856  }
12857 
12858  // Process custom pools.
12859  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12860  {
12861  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12862  pDefragmentationStats,
12863  maxBytesToMove,
12864  maxAllocationsToMove);
12865  }
12866 
12867  // ======== Destroy defragmentators.
12868 
12869  // Process custom pools.
12870  for(size_t poolIndex = poolCount; poolIndex--; )
12871  {
12872  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12873  }
12874 
12875  // Process standard memory.
12876  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12877  {
12878  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12879  {
12880  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12881  }
12882  }
12883 
12884  return result;
12885 }
12886 
12887 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12888 {
12889  if(hAllocation->CanBecomeLost())
12890  {
12891  /*
12892  Warning: This is a carefully designed algorithm.
12893  Do not modify unless you really know what you're doing :)
12894  */
12895  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12896  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12897  for(;;)
12898  {
12899  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12900  {
12901  pAllocationInfo->memoryType = UINT32_MAX;
12902  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12903  pAllocationInfo->offset = 0;
12904  pAllocationInfo->size = hAllocation->GetSize();
12905  pAllocationInfo->pMappedData = VMA_NULL;
12906  pAllocationInfo->pUserData = hAllocation->GetUserData();
12907  return;
12908  }
12909  else if(localLastUseFrameIndex == localCurrFrameIndex)
12910  {
12911  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12912  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12913  pAllocationInfo->offset = hAllocation->GetOffset();
12914  pAllocationInfo->size = hAllocation->GetSize();
12915  pAllocationInfo->pMappedData = VMA_NULL;
12916  pAllocationInfo->pUserData = hAllocation->GetUserData();
12917  return;
12918  }
12919  else // Last use time earlier than current time.
12920  {
12921  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12922  {
12923  localLastUseFrameIndex = localCurrFrameIndex;
12924  }
12925  }
12926  }
12927  }
12928  else
12929  {
12930 #if VMA_STATS_STRING_ENABLED
12931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12933  for(;;)
12934  {
12935  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12936  if(localLastUseFrameIndex == localCurrFrameIndex)
12937  {
12938  break;
12939  }
12940  else // Last use time earlier than current time.
12941  {
12942  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12943  {
12944  localLastUseFrameIndex = localCurrFrameIndex;
12945  }
12946  }
12947  }
12948 #endif
12949 
12950  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12951  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12952  pAllocationInfo->offset = hAllocation->GetOffset();
12953  pAllocationInfo->size = hAllocation->GetSize();
12954  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12955  pAllocationInfo->pUserData = hAllocation->GetUserData();
12956  }
12957 }
12958 
12959 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12960 {
12961  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12962  if(hAllocation->CanBecomeLost())
12963  {
12964  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12965  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12966  for(;;)
12967  {
12968  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12969  {
12970  return false;
12971  }
12972  else if(localLastUseFrameIndex == localCurrFrameIndex)
12973  {
12974  return true;
12975  }
12976  else // Last use time earlier than current time.
12977  {
12978  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12979  {
12980  localLastUseFrameIndex = localCurrFrameIndex;
12981  }
12982  }
12983  }
12984  }
12985  else
12986  {
12987 #if VMA_STATS_STRING_ENABLED
12988  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12989  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12990  for(;;)
12991  {
12992  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12993  if(localLastUseFrameIndex == localCurrFrameIndex)
12994  {
12995  break;
12996  }
12997  else // Last use time earlier than current time.
12998  {
12999  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
13000  {
13001  localLastUseFrameIndex = localCurrFrameIndex;
13002  }
13003  }
13004  }
13005 #endif
13006 
13007  return true;
13008  }
13009 }
13010 
13011 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13012 {
13013  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13014 
13015  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13016 
13017  if(newCreateInfo.maxBlockCount == 0)
13018  {
13019  newCreateInfo.maxBlockCount = SIZE_MAX;
13020  }
13021  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13022  {
13023  return VK_ERROR_INITIALIZATION_FAILED;
13024  }
13025 
13026  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13027 
13028  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13029 
13030  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13031  if(res != VK_SUCCESS)
13032  {
13033  vma_delete(this, *pPool);
13034  *pPool = VMA_NULL;
13035  return res;
13036  }
13037 
13038  // Add to m_Pools.
13039  {
13040  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13041  (*pPool)->SetId(m_NextPoolId++);
13042  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13043  }
13044 
13045  return VK_SUCCESS;
13046 }
13047 
13048 void VmaAllocator_T::DestroyPool(VmaPool pool)
13049 {
13050  // Remove from m_Pools.
13051  {
13052  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13053  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13054  VMA_ASSERT(success && "Pool not found in Allocator.");
13055  }
13056 
13057  vma_delete(this, pool);
13058 }
13059 
13060 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13061 {
13062  pool->m_BlockVector.GetPoolStats(pPoolStats);
13063 }
13064 
13065 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13066 {
13067  m_CurrentFrameIndex.store(frameIndex);
13068 }
13069 
13070 void VmaAllocator_T::MakePoolAllocationsLost(
13071  VmaPool hPool,
13072  size_t* pLostAllocationCount)
13073 {
13074  hPool->m_BlockVector.MakePoolAllocationsLost(
13075  m_CurrentFrameIndex.load(),
13076  pLostAllocationCount);
13077 }
13078 
13079 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13080 {
13081  return hPool->m_BlockVector.CheckCorruption();
13082 }
13083 
13084 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13085 {
13086  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13087 
13088  // Process default pools.
13089  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13090  {
13091  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13092  {
13093  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13094  VMA_ASSERT(pBlockVector);
13095  VkResult localRes = pBlockVector->CheckCorruption();
13096  switch(localRes)
13097  {
13098  case VK_ERROR_FEATURE_NOT_PRESENT:
13099  break;
13100  case VK_SUCCESS:
13101  finalRes = VK_SUCCESS;
13102  break;
13103  default:
13104  return localRes;
13105  }
13106  }
13107  }
13108 
13109  // Process custom pools.
13110  {
13111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13112  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13113  {
13114  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13115  {
13116  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13117  switch(localRes)
13118  {
13119  case VK_ERROR_FEATURE_NOT_PRESENT:
13120  break;
13121  case VK_SUCCESS:
13122  finalRes = VK_SUCCESS;
13123  break;
13124  default:
13125  return localRes;
13126  }
13127  }
13128  }
13129  }
13130 
13131  return finalRes;
13132 }
13133 
13134 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13135 {
13136  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13137  (*pAllocation)->InitLost();
13138 }
13139 
13140 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13141 {
13142  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13143 
13144  VkResult res;
13145  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13146  {
13147  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13148  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13149  {
13150  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13151  if(res == VK_SUCCESS)
13152  {
13153  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13154  }
13155  }
13156  else
13157  {
13158  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13159  }
13160  }
13161  else
13162  {
13163  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13164  }
13165 
13166  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13167  {
13168  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13169  }
13170 
13171  return res;
13172 }
13173 
13174 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13175 {
13176  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13177  {
13178  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13179  }
13180 
13181  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13182 
13183  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13184  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13185  {
13186  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13187  m_HeapSizeLimit[heapIndex] += size;
13188  }
13189 }
13190 
13191 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13192 {
13193  if(hAllocation->CanBecomeLost())
13194  {
13195  return VK_ERROR_MEMORY_MAP_FAILED;
13196  }
13197 
13198  switch(hAllocation->GetType())
13199  {
13200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13201  {
13202  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13203  char *pBytes = VMA_NULL;
13204  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13205  if(res == VK_SUCCESS)
13206  {
13207  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13208  hAllocation->BlockAllocMap();
13209  }
13210  return res;
13211  }
13212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13213  return hAllocation->DedicatedAllocMap(this, ppData);
13214  default:
13215  VMA_ASSERT(0);
13216  return VK_ERROR_MEMORY_MAP_FAILED;
13217  }
13218 }
13219 
13220 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13221 {
13222  switch(hAllocation->GetType())
13223  {
13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13225  {
13226  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13227  hAllocation->BlockAllocUnmap();
13228  pBlock->Unmap(this, 1);
13229  }
13230  break;
13231  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13232  hAllocation->DedicatedAllocUnmap(this);
13233  break;
13234  default:
13235  VMA_ASSERT(0);
13236  }
13237 }
13238 
13239 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13240 {
13241  VkResult res = VK_SUCCESS;
13242  switch(hAllocation->GetType())
13243  {
13244  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13245  res = GetVulkanFunctions().vkBindBufferMemory(
13246  m_hDevice,
13247  hBuffer,
13248  hAllocation->GetMemory(),
13249  0); //memoryOffset
13250  break;
13251  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13252  {
13253  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13254  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13255  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13256  break;
13257  }
13258  default:
13259  VMA_ASSERT(0);
13260  }
13261  return res;
13262 }
13263 
13264 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13265 {
13266  VkResult res = VK_SUCCESS;
13267  switch(hAllocation->GetType())
13268  {
13269  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13270  res = GetVulkanFunctions().vkBindImageMemory(
13271  m_hDevice,
13272  hImage,
13273  hAllocation->GetMemory(),
13274  0); //memoryOffset
13275  break;
13276  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13277  {
13278  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13279  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13280  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13281  break;
13282  }
13283  default:
13284  VMA_ASSERT(0);
13285  }
13286  return res;
13287 }
13288 
13289 void VmaAllocator_T::FlushOrInvalidateAllocation(
13290  VmaAllocation hAllocation,
13291  VkDeviceSize offset, VkDeviceSize size,
13292  VMA_CACHE_OPERATION op)
13293 {
13294  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13295  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13296  {
13297  const VkDeviceSize allocationSize = hAllocation->GetSize();
13298  VMA_ASSERT(offset <= allocationSize);
13299 
13300  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13301 
13302  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13303  memRange.memory = hAllocation->GetMemory();
13304 
13305  switch(hAllocation->GetType())
13306  {
13307  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13308  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13309  if(size == VK_WHOLE_SIZE)
13310  {
13311  memRange.size = allocationSize - memRange.offset;
13312  }
13313  else
13314  {
13315  VMA_ASSERT(offset + size <= allocationSize);
13316  memRange.size = VMA_MIN(
13317  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13318  allocationSize - memRange.offset);
13319  }
13320  break;
13321 
13322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13323  {
13324  // 1. Still within this allocation.
13325  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13326  if(size == VK_WHOLE_SIZE)
13327  {
13328  size = allocationSize - offset;
13329  }
13330  else
13331  {
13332  VMA_ASSERT(offset + size <= allocationSize);
13333  }
13334  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13335 
13336  // 2. Adjust to whole block.
13337  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13338  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13339  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13340  memRange.offset += allocationOffset;
13341  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13342 
13343  break;
13344  }
13345 
13346  default:
13347  VMA_ASSERT(0);
13348  }
13349 
13350  switch(op)
13351  {
13352  case VMA_CACHE_FLUSH:
13353  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13354  break;
13355  case VMA_CACHE_INVALIDATE:
13356  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13357  break;
13358  default:
13359  VMA_ASSERT(0);
13360  }
13361  }
13362  // else: Just ignore this call.
13363 }
13364 
13365 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13366 {
13367  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13368 
13369  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13370  {
13371  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13372  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13373  VMA_ASSERT(pDedicatedAllocations);
13374  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13375  VMA_ASSERT(success);
13376  }
13377 
13378  VkDeviceMemory hMemory = allocation->GetMemory();
13379 
13380  /*
13381  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13382  before vkFreeMemory.
13383 
13384  if(allocation->GetMappedData() != VMA_NULL)
13385  {
13386  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13387  }
13388  */
13389 
13390  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13391 
13392  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13393 }
13394 
13395 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13396 {
13397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13398  !hAllocation->CanBecomeLost() &&
13399  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13400  {
13401  void* pData = VMA_NULL;
13402  VkResult res = Map(hAllocation, &pData);
13403  if(res == VK_SUCCESS)
13404  {
13405  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13406  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13407  Unmap(hAllocation);
13408  }
13409  else
13410  {
13411  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13412  }
13413  }
13414 }
13415 
13416 #if VMA_STATS_STRING_ENABLED
13417 
13418 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13419 {
13420  bool dedicatedAllocationsStarted = false;
13421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13422  {
13423  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13424  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13425  VMA_ASSERT(pDedicatedAllocVector);
13426  if(pDedicatedAllocVector->empty() == false)
13427  {
13428  if(dedicatedAllocationsStarted == false)
13429  {
13430  dedicatedAllocationsStarted = true;
13431  json.WriteString("DedicatedAllocations");
13432  json.BeginObject();
13433  }
13434 
13435  json.BeginString("Type ");
13436  json.ContinueString(memTypeIndex);
13437  json.EndString();
13438 
13439  json.BeginArray();
13440 
13441  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13442  {
13443  json.BeginObject(true);
13444  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13445  hAlloc->PrintParameters(json);
13446  json.EndObject();
13447  }
13448 
13449  json.EndArray();
13450  }
13451  }
13452  if(dedicatedAllocationsStarted)
13453  {
13454  json.EndObject();
13455  }
13456 
13457  {
13458  bool allocationsStarted = false;
13459  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13460  {
13461  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13462  {
13463  if(allocationsStarted == false)
13464  {
13465  allocationsStarted = true;
13466  json.WriteString("DefaultPools");
13467  json.BeginObject();
13468  }
13469 
13470  json.BeginString("Type ");
13471  json.ContinueString(memTypeIndex);
13472  json.EndString();
13473 
13474  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13475  }
13476  }
13477  if(allocationsStarted)
13478  {
13479  json.EndObject();
13480  }
13481  }
13482 
13483  // Custom pools
13484  {
13485  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13486  const size_t poolCount = m_Pools.size();
13487  if(poolCount > 0)
13488  {
13489  json.WriteString("Pools");
13490  json.BeginObject();
13491  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13492  {
13493  json.BeginString();
13494  json.ContinueString(m_Pools[poolIndex]->GetId());
13495  json.EndString();
13496 
13497  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13498  }
13499  json.EndObject();
13500  }
13501  }
13502 }
13503 
13504 #endif // #if VMA_STATS_STRING_ENABLED
13505 
13507 // Public interface
13508 
13509 VkResult vmaCreateAllocator(
13510  const VmaAllocatorCreateInfo* pCreateInfo,
13511  VmaAllocator* pAllocator)
13512 {
13513  VMA_ASSERT(pCreateInfo && pAllocator);
13514  VMA_DEBUG_LOG("vmaCreateAllocator");
13515  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13516  return (*pAllocator)->Init(pCreateInfo);
13517 }
13518 
13519 void vmaDestroyAllocator(
13520  VmaAllocator allocator)
13521 {
13522  if(allocator != VK_NULL_HANDLE)
13523  {
13524  VMA_DEBUG_LOG("vmaDestroyAllocator");
13525  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13526  vma_delete(&allocationCallbacks, allocator);
13527  }
13528 }
13529 
13531  VmaAllocator allocator,
13532  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13533 {
13534  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13535  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13536 }
13537 
13539  VmaAllocator allocator,
13540  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13541 {
13542  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13543  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13544 }
13545 
13547  VmaAllocator allocator,
13548  uint32_t memoryTypeIndex,
13549  VkMemoryPropertyFlags* pFlags)
13550 {
13551  VMA_ASSERT(allocator && pFlags);
13552  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13553  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13554 }
13555 
13557  VmaAllocator allocator,
13558  uint32_t frameIndex)
13559 {
13560  VMA_ASSERT(allocator);
13561  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13562 
13563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13564 
13565  allocator->SetCurrentFrameIndex(frameIndex);
13566 }
13567 
13568 void vmaCalculateStats(
13569  VmaAllocator allocator,
13570  VmaStats* pStats)
13571 {
13572  VMA_ASSERT(allocator && pStats);
13573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13574  allocator->CalculateStats(pStats);
13575 }
13576 
13577 #if VMA_STATS_STRING_ENABLED
13578 
13579 void vmaBuildStatsString(
13580  VmaAllocator allocator,
13581  char** ppStatsString,
13582  VkBool32 detailedMap)
13583 {
13584  VMA_ASSERT(allocator && ppStatsString);
13585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13586 
13587  VmaStringBuilder sb(allocator);
13588  {
13589  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13590  json.BeginObject();
13591 
13592  VmaStats stats;
13593  allocator->CalculateStats(&stats);
13594 
13595  json.WriteString("Total");
13596  VmaPrintStatInfo(json, stats.total);
13597 
13598  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13599  {
13600  json.BeginString("Heap ");
13601  json.ContinueString(heapIndex);
13602  json.EndString();
13603  json.BeginObject();
13604 
13605  json.WriteString("Size");
13606  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13607 
13608  json.WriteString("Flags");
13609  json.BeginArray(true);
13610  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13611  {
13612  json.WriteString("DEVICE_LOCAL");
13613  }
13614  json.EndArray();
13615 
13616  if(stats.memoryHeap[heapIndex].blockCount > 0)
13617  {
13618  json.WriteString("Stats");
13619  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13620  }
13621 
13622  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13623  {
13624  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13625  {
13626  json.BeginString("Type ");
13627  json.ContinueString(typeIndex);
13628  json.EndString();
13629 
13630  json.BeginObject();
13631 
13632  json.WriteString("Flags");
13633  json.BeginArray(true);
13634  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13635  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13636  {
13637  json.WriteString("DEVICE_LOCAL");
13638  }
13639  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13640  {
13641  json.WriteString("HOST_VISIBLE");
13642  }
13643  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13644  {
13645  json.WriteString("HOST_COHERENT");
13646  }
13647  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13648  {
13649  json.WriteString("HOST_CACHED");
13650  }
13651  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13652  {
13653  json.WriteString("LAZILY_ALLOCATED");
13654  }
13655  json.EndArray();
13656 
13657  if(stats.memoryType[typeIndex].blockCount > 0)
13658  {
13659  json.WriteString("Stats");
13660  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13661  }
13662 
13663  json.EndObject();
13664  }
13665  }
13666 
13667  json.EndObject();
13668  }
13669  if(detailedMap == VK_TRUE)
13670  {
13671  allocator->PrintDetailedMap(json);
13672  }
13673 
13674  json.EndObject();
13675  }
13676 
13677  const size_t len = sb.GetLength();
13678  char* const pChars = vma_new_array(allocator, char, len + 1);
13679  if(len > 0)
13680  {
13681  memcpy(pChars, sb.GetData(), len);
13682  }
13683  pChars[len] = '\0';
13684  *ppStatsString = pChars;
13685 }
13686 
13687 void vmaFreeStatsString(
13688  VmaAllocator allocator,
13689  char* pStatsString)
13690 {
13691  if(pStatsString != VMA_NULL)
13692  {
13693  VMA_ASSERT(allocator);
13694  size_t len = strlen(pStatsString);
13695  vma_delete_array(allocator, pStatsString, len + 1);
13696  }
13697 }
13698 
13699 #endif // #if VMA_STATS_STRING_ENABLED
13700 
13701 /*
13702 This function is not protected by any mutex because it just reads immutable data.
13703 */
13704 VkResult vmaFindMemoryTypeIndex(
13705  VmaAllocator allocator,
13706  uint32_t memoryTypeBits,
13707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13708  uint32_t* pMemoryTypeIndex)
13709 {
13710  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13713 
13714  if(pAllocationCreateInfo->memoryTypeBits != 0)
13715  {
13716  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13717  }
13718 
13719  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13720  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13721 
13722  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13723  if(mapped)
13724  {
13725  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13726  }
13727 
13728  // Convert usage to requiredFlags and preferredFlags.
13729  switch(pAllocationCreateInfo->usage)
13730  {
13732  break;
13734  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13735  {
13736  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13737  }
13738  break;
13740  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13741  break;
13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13744  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13745  {
13746  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13747  }
13748  break;
13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13751  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13752  break;
13753  default:
13754  break;
13755  }
13756 
13757  *pMemoryTypeIndex = UINT32_MAX;
13758  uint32_t minCost = UINT32_MAX;
13759  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13760  memTypeIndex < allocator->GetMemoryTypeCount();
13761  ++memTypeIndex, memTypeBit <<= 1)
13762  {
13763  // This memory type is acceptable according to memoryTypeBits bitmask.
13764  if((memTypeBit & memoryTypeBits) != 0)
13765  {
13766  const VkMemoryPropertyFlags currFlags =
13767  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13768  // This memory type contains requiredFlags.
13769  if((requiredFlags & ~currFlags) == 0)
13770  {
13771  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13772  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13773  // Remember memory type with lowest cost.
13774  if(currCost < minCost)
13775  {
13776  *pMemoryTypeIndex = memTypeIndex;
13777  if(currCost == 0)
13778  {
13779  return VK_SUCCESS;
13780  }
13781  minCost = currCost;
13782  }
13783  }
13784  }
13785  }
13786  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13787 }
13788 
13790  VmaAllocator allocator,
13791  const VkBufferCreateInfo* pBufferCreateInfo,
13792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13793  uint32_t* pMemoryTypeIndex)
13794 {
13795  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13796  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13797  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13798  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13799 
13800  const VkDevice hDev = allocator->m_hDevice;
13801  VkBuffer hBuffer = VK_NULL_HANDLE;
13802  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13803  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13804  if(res == VK_SUCCESS)
13805  {
13806  VkMemoryRequirements memReq = {};
13807  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13808  hDev, hBuffer, &memReq);
13809 
13810  res = vmaFindMemoryTypeIndex(
13811  allocator,
13812  memReq.memoryTypeBits,
13813  pAllocationCreateInfo,
13814  pMemoryTypeIndex);
13815 
13816  allocator->GetVulkanFunctions().vkDestroyBuffer(
13817  hDev, hBuffer, allocator->GetAllocationCallbacks());
13818  }
13819  return res;
13820 }
13821 
13823  VmaAllocator allocator,
13824  const VkImageCreateInfo* pImageCreateInfo,
13825  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13826  uint32_t* pMemoryTypeIndex)
13827 {
13828  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13829  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13830  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13831  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13832 
13833  const VkDevice hDev = allocator->m_hDevice;
13834  VkImage hImage = VK_NULL_HANDLE;
13835  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13836  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13837  if(res == VK_SUCCESS)
13838  {
13839  VkMemoryRequirements memReq = {};
13840  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13841  hDev, hImage, &memReq);
13842 
13843  res = vmaFindMemoryTypeIndex(
13844  allocator,
13845  memReq.memoryTypeBits,
13846  pAllocationCreateInfo,
13847  pMemoryTypeIndex);
13848 
13849  allocator->GetVulkanFunctions().vkDestroyImage(
13850  hDev, hImage, allocator->GetAllocationCallbacks());
13851  }
13852  return res;
13853 }
13854 
13855 VkResult vmaCreatePool(
13856  VmaAllocator allocator,
13857  const VmaPoolCreateInfo* pCreateInfo,
13858  VmaPool* pPool)
13859 {
13860  VMA_ASSERT(allocator && pCreateInfo && pPool);
13861 
13862  VMA_DEBUG_LOG("vmaCreatePool");
13863 
13864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13865 
13866  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13872  }
13873 #endif
13874 
13875  return res;
13876 }
13877 
13878 void vmaDestroyPool(
13879  VmaAllocator allocator,
13880  VmaPool pool)
13881 {
13882  VMA_ASSERT(allocator);
13883 
13884  if(pool == VK_NULL_HANDLE)
13885  {
13886  return;
13887  }
13888 
13889  VMA_DEBUG_LOG("vmaDestroyPool");
13890 
13891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13892 
13893 #if VMA_RECORDING_ENABLED
13894  if(allocator->GetRecorder() != VMA_NULL)
13895  {
13896  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13897  }
13898 #endif
13899 
13900  allocator->DestroyPool(pool);
13901 }
13902 
13903 void vmaGetPoolStats(
13904  VmaAllocator allocator,
13905  VmaPool pool,
13906  VmaPoolStats* pPoolStats)
13907 {
13908  VMA_ASSERT(allocator && pool && pPoolStats);
13909 
13910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13911 
13912  allocator->GetPoolStats(pool, pPoolStats);
13913 }
13914 
13916  VmaAllocator allocator,
13917  VmaPool pool,
13918  size_t* pLostAllocationCount)
13919 {
13920  VMA_ASSERT(allocator && pool);
13921 
13922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13923 
13924 #if VMA_RECORDING_ENABLED
13925  if(allocator->GetRecorder() != VMA_NULL)
13926  {
13927  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13928  }
13929 #endif
13930 
13931  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13932 }
13933 
13934 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13935 {
13936  VMA_ASSERT(allocator && pool);
13937 
13938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13939 
13940  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13941 
13942  return allocator->CheckPoolCorruption(pool);
13943 }
13944 
13945 VkResult vmaAllocateMemory(
13946  VmaAllocator allocator,
13947  const VkMemoryRequirements* pVkMemoryRequirements,
13948  const VmaAllocationCreateInfo* pCreateInfo,
13949  VmaAllocation* pAllocation,
13950  VmaAllocationInfo* pAllocationInfo)
13951 {
13952  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13953 
13954  VMA_DEBUG_LOG("vmaAllocateMemory");
13955 
13956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13957 
13958  VkResult result = allocator->AllocateMemory(
13959  *pVkMemoryRequirements,
13960  false, // requiresDedicatedAllocation
13961  false, // prefersDedicatedAllocation
13962  VK_NULL_HANDLE, // dedicatedBuffer
13963  VK_NULL_HANDLE, // dedicatedImage
13964  *pCreateInfo,
13965  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13966  pAllocation);
13967 
13968 #if VMA_RECORDING_ENABLED
13969  if(allocator->GetRecorder() != VMA_NULL)
13970  {
13971  allocator->GetRecorder()->RecordAllocateMemory(
13972  allocator->GetCurrentFrameIndex(),
13973  *pVkMemoryRequirements,
13974  *pCreateInfo,
13975  *pAllocation);
13976  }
13977 #endif
13978 
13979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13980  {
13981  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13982  }
13983 
13984  return result;
13985 }
13986 
13988  VmaAllocator allocator,
13989  VkBuffer buffer,
13990  const VmaAllocationCreateInfo* pCreateInfo,
13991  VmaAllocation* pAllocation,
13992  VmaAllocationInfo* pAllocationInfo)
13993 {
13994  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13995 
13996  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13997 
13998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13999 
14000  VkMemoryRequirements vkMemReq = {};
14001  bool requiresDedicatedAllocation = false;
14002  bool prefersDedicatedAllocation = false;
14003  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14004  requiresDedicatedAllocation,
14005  prefersDedicatedAllocation);
14006 
14007  VkResult result = allocator->AllocateMemory(
14008  vkMemReq,
14009  requiresDedicatedAllocation,
14010  prefersDedicatedAllocation,
14011  buffer, // dedicatedBuffer
14012  VK_NULL_HANDLE, // dedicatedImage
14013  *pCreateInfo,
14014  VMA_SUBALLOCATION_TYPE_BUFFER,
14015  pAllocation);
14016 
14017 #if VMA_RECORDING_ENABLED
14018  if(allocator->GetRecorder() != VMA_NULL)
14019  {
14020  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14021  allocator->GetCurrentFrameIndex(),
14022  vkMemReq,
14023  requiresDedicatedAllocation,
14024  prefersDedicatedAllocation,
14025  *pCreateInfo,
14026  *pAllocation);
14027  }
14028 #endif
14029 
14030  if(pAllocationInfo && result == VK_SUCCESS)
14031  {
14032  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14033  }
14034 
14035  return result;
14036 }
14037 
14038 VkResult vmaAllocateMemoryForImage(
14039  VmaAllocator allocator,
14040  VkImage image,
14041  const VmaAllocationCreateInfo* pCreateInfo,
14042  VmaAllocation* pAllocation,
14043  VmaAllocationInfo* pAllocationInfo)
14044 {
14045  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14046 
14047  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14048 
14049  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14050 
14051  VkMemoryRequirements vkMemReq = {};
14052  bool requiresDedicatedAllocation = false;
14053  bool prefersDedicatedAllocation = false;
14054  allocator->GetImageMemoryRequirements(image, vkMemReq,
14055  requiresDedicatedAllocation, prefersDedicatedAllocation);
14056 
14057  VkResult result = allocator->AllocateMemory(
14058  vkMemReq,
14059  requiresDedicatedAllocation,
14060  prefersDedicatedAllocation,
14061  VK_NULL_HANDLE, // dedicatedBuffer
14062  image, // dedicatedImage
14063  *pCreateInfo,
14064  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14065  pAllocation);
14066 
14067 #if VMA_RECORDING_ENABLED
14068  if(allocator->GetRecorder() != VMA_NULL)
14069  {
14070  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14071  allocator->GetCurrentFrameIndex(),
14072  vkMemReq,
14073  requiresDedicatedAllocation,
14074  prefersDedicatedAllocation,
14075  *pCreateInfo,
14076  *pAllocation);
14077  }
14078 #endif
14079 
14080  if(pAllocationInfo && result == VK_SUCCESS)
14081  {
14082  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14083  }
14084 
14085  return result;
14086 }
14087 
14088 void vmaFreeMemory(
14089  VmaAllocator allocator,
14090  VmaAllocation allocation)
14091 {
14092  VMA_ASSERT(allocator);
14093 
14094  if(allocation == VK_NULL_HANDLE)
14095  {
14096  return;
14097  }
14098 
14099  VMA_DEBUG_LOG("vmaFreeMemory");
14100 
14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14102 
14103 #if VMA_RECORDING_ENABLED
14104  if(allocator->GetRecorder() != VMA_NULL)
14105  {
14106  allocator->GetRecorder()->RecordFreeMemory(
14107  allocator->GetCurrentFrameIndex(),
14108  allocation);
14109  }
14110 #endif
14111 
14112  allocator->FreeMemory(allocation);
14113 }
14114 
14115 VkResult vmaResizeAllocation(
14116  VmaAllocator allocator,
14117  VmaAllocation allocation,
14118  VkDeviceSize newSize)
14119 {
14120  VMA_ASSERT(allocator && allocation);
14121 
14122  VMA_DEBUG_LOG("vmaResizeAllocation");
14123 
14124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14125 
14126 #if VMA_RECORDING_ENABLED
14127  if(allocator->GetRecorder() != VMA_NULL)
14128  {
14129  allocator->GetRecorder()->RecordResizeAllocation(
14130  allocator->GetCurrentFrameIndex(),
14131  allocation,
14132  newSize);
14133  }
14134 #endif
14135 
14136  return allocator->ResizeAllocation(allocation, newSize);
14137 }
14138 
14140  VmaAllocator allocator,
14141  VmaAllocation allocation,
14142  VmaAllocationInfo* pAllocationInfo)
14143 {
14144  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14145 
14146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14147 
14148 #if VMA_RECORDING_ENABLED
14149  if(allocator->GetRecorder() != VMA_NULL)
14150  {
14151  allocator->GetRecorder()->RecordGetAllocationInfo(
14152  allocator->GetCurrentFrameIndex(),
14153  allocation);
14154  }
14155 #endif
14156 
14157  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14158 }
14159 
14160 VkBool32 vmaTouchAllocation(
14161  VmaAllocator allocator,
14162  VmaAllocation allocation)
14163 {
14164  VMA_ASSERT(allocator && allocation);
14165 
14166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14167 
14168 #if VMA_RECORDING_ENABLED
14169  if(allocator->GetRecorder() != VMA_NULL)
14170  {
14171  allocator->GetRecorder()->RecordTouchAllocation(
14172  allocator->GetCurrentFrameIndex(),
14173  allocation);
14174  }
14175 #endif
14176 
14177  return allocator->TouchAllocation(allocation);
14178 }
14179 
14181  VmaAllocator allocator,
14182  VmaAllocation allocation,
14183  void* pUserData)
14184 {
14185  VMA_ASSERT(allocator && allocation);
14186 
14187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14188 
14189  allocation->SetUserData(allocator, pUserData);
14190 
14191 #if VMA_RECORDING_ENABLED
14192  if(allocator->GetRecorder() != VMA_NULL)
14193  {
14194  allocator->GetRecorder()->RecordSetAllocationUserData(
14195  allocator->GetCurrentFrameIndex(),
14196  allocation,
14197  pUserData);
14198  }
14199 #endif
14200 }
14201 
14203  VmaAllocator allocator,
14204  VmaAllocation* pAllocation)
14205 {
14206  VMA_ASSERT(allocator && pAllocation);
14207 
14208  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14209 
14210  allocator->CreateLostAllocation(pAllocation);
14211 
14212 #if VMA_RECORDING_ENABLED
14213  if(allocator->GetRecorder() != VMA_NULL)
14214  {
14215  allocator->GetRecorder()->RecordCreateLostAllocation(
14216  allocator->GetCurrentFrameIndex(),
14217  *pAllocation);
14218  }
14219 #endif
14220 }
14221 
14222 VkResult vmaMapMemory(
14223  VmaAllocator allocator,
14224  VmaAllocation allocation,
14225  void** ppData)
14226 {
14227  VMA_ASSERT(allocator && allocation && ppData);
14228 
14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14230 
14231  VkResult res = allocator->Map(allocation, ppData);
14232 
14233 #if VMA_RECORDING_ENABLED
14234  if(allocator->GetRecorder() != VMA_NULL)
14235  {
14236  allocator->GetRecorder()->RecordMapMemory(
14237  allocator->GetCurrentFrameIndex(),
14238  allocation);
14239  }
14240 #endif
14241 
14242  return res;
14243 }
14244 
14245 void vmaUnmapMemory(
14246  VmaAllocator allocator,
14247  VmaAllocation allocation)
14248 {
14249  VMA_ASSERT(allocator && allocation);
14250 
14251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14252 
14253 #if VMA_RECORDING_ENABLED
14254  if(allocator->GetRecorder() != VMA_NULL)
14255  {
14256  allocator->GetRecorder()->RecordUnmapMemory(
14257  allocator->GetCurrentFrameIndex(),
14258  allocation);
14259  }
14260 #endif
14261 
14262  allocator->Unmap(allocation);
14263 }
14264 
14265 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14266 {
14267  VMA_ASSERT(allocator && allocation);
14268 
14269  VMA_DEBUG_LOG("vmaFlushAllocation");
14270 
14271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14272 
14273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14274 
14275 #if VMA_RECORDING_ENABLED
14276  if(allocator->GetRecorder() != VMA_NULL)
14277  {
14278  allocator->GetRecorder()->RecordFlushAllocation(
14279  allocator->GetCurrentFrameIndex(),
14280  allocation, offset, size);
14281  }
14282 #endif
14283 }
14284 
14285 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14286 {
14287  VMA_ASSERT(allocator && allocation);
14288 
14289  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14290 
14291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14292 
14293  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14294 
14295 #if VMA_RECORDING_ENABLED
14296  if(allocator->GetRecorder() != VMA_NULL)
14297  {
14298  allocator->GetRecorder()->RecordInvalidateAllocation(
14299  allocator->GetCurrentFrameIndex(),
14300  allocation, offset, size);
14301  }
14302 #endif
14303 }
14304 
14305 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14306 {
14307  VMA_ASSERT(allocator);
14308 
14309  VMA_DEBUG_LOG("vmaCheckCorruption");
14310 
14311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14312 
14313  return allocator->CheckCorruption(memoryTypeBits);
14314 }
14315 
14316 VkResult vmaDefragment(
14317  VmaAllocator allocator,
14318  VmaAllocation* pAllocations,
14319  size_t allocationCount,
14320  VkBool32* pAllocationsChanged,
14321  const VmaDefragmentationInfo *pDefragmentationInfo,
14322  VmaDefragmentationStats* pDefragmentationStats)
14323 {
14324  VMA_ASSERT(allocator && pAllocations);
14325 
14326  VMA_DEBUG_LOG("vmaDefragment");
14327 
14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14329 
14330  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14331 }
14332 
14333 VkResult vmaBindBufferMemory(
14334  VmaAllocator allocator,
14335  VmaAllocation allocation,
14336  VkBuffer buffer)
14337 {
14338  VMA_ASSERT(allocator && allocation && buffer);
14339 
14340  VMA_DEBUG_LOG("vmaBindBufferMemory");
14341 
14342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14343 
14344  return allocator->BindBufferMemory(allocation, buffer);
14345 }
14346 
14347 VkResult vmaBindImageMemory(
14348  VmaAllocator allocator,
14349  VmaAllocation allocation,
14350  VkImage image)
14351 {
14352  VMA_ASSERT(allocator && allocation && image);
14353 
14354  VMA_DEBUG_LOG("vmaBindImageMemory");
14355 
14356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14357 
14358  return allocator->BindImageMemory(allocation, image);
14359 }
14360 
14361 VkResult vmaCreateBuffer(
14362  VmaAllocator allocator,
14363  const VkBufferCreateInfo* pBufferCreateInfo,
14364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14365  VkBuffer* pBuffer,
14366  VmaAllocation* pAllocation,
14367  VmaAllocationInfo* pAllocationInfo)
14368 {
14369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14370 
14371  if(pBufferCreateInfo->size == 0)
14372  {
14373  return VK_ERROR_VALIDATION_FAILED_EXT;
14374  }
14375 
14376  VMA_DEBUG_LOG("vmaCreateBuffer");
14377 
14378  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14379 
14380  *pBuffer = VK_NULL_HANDLE;
14381  *pAllocation = VK_NULL_HANDLE;
14382 
14383  // 1. Create VkBuffer.
14384  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14385  allocator->m_hDevice,
14386  pBufferCreateInfo,
14387  allocator->GetAllocationCallbacks(),
14388  pBuffer);
14389  if(res >= 0)
14390  {
14391  // 2. vkGetBufferMemoryRequirements.
14392  VkMemoryRequirements vkMemReq = {};
14393  bool requiresDedicatedAllocation = false;
14394  bool prefersDedicatedAllocation = false;
14395  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14396  requiresDedicatedAllocation, prefersDedicatedAllocation);
14397 
14398  // Make sure alignment requirements for specific buffer usages reported
14399  // in Physical Device Properties are included in alignment reported by memory requirements.
14400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14401  {
14402  VMA_ASSERT(vkMemReq.alignment %
14403  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14404  }
14405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14406  {
14407  VMA_ASSERT(vkMemReq.alignment %
14408  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14409  }
14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14411  {
14412  VMA_ASSERT(vkMemReq.alignment %
14413  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14414  }
14415 
14416  // 3. Allocate memory using allocator.
14417  res = allocator->AllocateMemory(
14418  vkMemReq,
14419  requiresDedicatedAllocation,
14420  prefersDedicatedAllocation,
14421  *pBuffer, // dedicatedBuffer
14422  VK_NULL_HANDLE, // dedicatedImage
14423  *pAllocationCreateInfo,
14424  VMA_SUBALLOCATION_TYPE_BUFFER,
14425  pAllocation);
14426 
14427 #if VMA_RECORDING_ENABLED
14428  if(allocator->GetRecorder() != VMA_NULL)
14429  {
14430  allocator->GetRecorder()->RecordCreateBuffer(
14431  allocator->GetCurrentFrameIndex(),
14432  *pBufferCreateInfo,
14433  *pAllocationCreateInfo,
14434  *pAllocation);
14435  }
14436 #endif
14437 
14438  if(res >= 0)
14439  {
14440  // 3. Bind buffer with memory.
14441  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14442  if(res >= 0)
14443  {
14444  // All steps succeeded.
14445  #if VMA_STATS_STRING_ENABLED
14446  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14447  #endif
14448  if(pAllocationInfo != VMA_NULL)
14449  {
14450  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14451  }
14452 
14453  return VK_SUCCESS;
14454  }
14455  allocator->FreeMemory(*pAllocation);
14456  *pAllocation = VK_NULL_HANDLE;
14457  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14458  *pBuffer = VK_NULL_HANDLE;
14459  return res;
14460  }
14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14462  *pBuffer = VK_NULL_HANDLE;
14463  return res;
14464  }
14465  return res;
14466 }
14467 
14468 void vmaDestroyBuffer(
14469  VmaAllocator allocator,
14470  VkBuffer buffer,
14471  VmaAllocation allocation)
14472 {
14473  VMA_ASSERT(allocator);
14474 
14475  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14476  {
14477  return;
14478  }
14479 
14480  VMA_DEBUG_LOG("vmaDestroyBuffer");
14481 
14482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14483 
14484 #if VMA_RECORDING_ENABLED
14485  if(allocator->GetRecorder() != VMA_NULL)
14486  {
14487  allocator->GetRecorder()->RecordDestroyBuffer(
14488  allocator->GetCurrentFrameIndex(),
14489  allocation);
14490  }
14491 #endif
14492 
14493  if(buffer != VK_NULL_HANDLE)
14494  {
14495  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14496  }
14497 
14498  if(allocation != VK_NULL_HANDLE)
14499  {
14500  allocator->FreeMemory(allocation);
14501  }
14502 }
14503 
14504 VkResult vmaCreateImage(
14505  VmaAllocator allocator,
14506  const VkImageCreateInfo* pImageCreateInfo,
14507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14508  VkImage* pImage,
14509  VmaAllocation* pAllocation,
14510  VmaAllocationInfo* pAllocationInfo)
14511 {
14512  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14513 
14514  if(pImageCreateInfo->extent.width == 0 ||
14515  pImageCreateInfo->extent.height == 0 ||
14516  pImageCreateInfo->extent.depth == 0 ||
14517  pImageCreateInfo->mipLevels == 0 ||
14518  pImageCreateInfo->arrayLayers == 0)
14519  {
14520  return VK_ERROR_VALIDATION_FAILED_EXT;
14521  }
14522 
14523  VMA_DEBUG_LOG("vmaCreateImage");
14524 
14525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14526 
14527  *pImage = VK_NULL_HANDLE;
14528  *pAllocation = VK_NULL_HANDLE;
14529 
14530  // 1. Create VkImage.
14531  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14532  allocator->m_hDevice,
14533  pImageCreateInfo,
14534  allocator->GetAllocationCallbacks(),
14535  pImage);
14536  if(res >= 0)
14537  {
14538  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14539  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14540  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14541 
14542  // 2. Allocate memory using allocator.
14543  VkMemoryRequirements vkMemReq = {};
14544  bool requiresDedicatedAllocation = false;
14545  bool prefersDedicatedAllocation = false;
14546  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14547  requiresDedicatedAllocation, prefersDedicatedAllocation);
14548 
14549  res = allocator->AllocateMemory(
14550  vkMemReq,
14551  requiresDedicatedAllocation,
14552  prefersDedicatedAllocation,
14553  VK_NULL_HANDLE, // dedicatedBuffer
14554  *pImage, // dedicatedImage
14555  *pAllocationCreateInfo,
14556  suballocType,
14557  pAllocation);
14558 
14559 #if VMA_RECORDING_ENABLED
14560  if(allocator->GetRecorder() != VMA_NULL)
14561  {
14562  allocator->GetRecorder()->RecordCreateImage(
14563  allocator->GetCurrentFrameIndex(),
14564  *pImageCreateInfo,
14565  *pAllocationCreateInfo,
14566  *pAllocation);
14567  }
14568 #endif
14569 
14570  if(res >= 0)
14571  {
14572  // 3. Bind image with memory.
14573  res = allocator->BindImageMemory(*pAllocation, *pImage);
14574  if(res >= 0)
14575  {
14576  // All steps succeeded.
14577  #if VMA_STATS_STRING_ENABLED
14578  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14579  #endif
14580  if(pAllocationInfo != VMA_NULL)
14581  {
14582  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14583  }
14584 
14585  return VK_SUCCESS;
14586  }
14587  allocator->FreeMemory(*pAllocation);
14588  *pAllocation = VK_NULL_HANDLE;
14589  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14590  *pImage = VK_NULL_HANDLE;
14591  return res;
14592  }
14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14594  *pImage = VK_NULL_HANDLE;
14595  return res;
14596  }
14597  return res;
14598 }
14599 
14600 void vmaDestroyImage(
14601  VmaAllocator allocator,
14602  VkImage image,
14603  VmaAllocation allocation)
14604 {
14605  VMA_ASSERT(allocator);
14606 
14607  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14608  {
14609  return;
14610  }
14611 
14612  VMA_DEBUG_LOG("vmaDestroyImage");
14613 
14614  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14615 
14616 #if VMA_RECORDING_ENABLED
14617  if(allocator->GetRecorder() != VMA_NULL)
14618  {
14619  allocator->GetRecorder()->RecordDestroyImage(
14620  allocator->GetCurrentFrameIndex(),
14621  allocation);
14622  }
14623 #endif
14624 
14625  if(image != VK_NULL_HANDLE)
14626  {
14627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14628  }
14629  if(allocation != VK_NULL_HANDLE)
14630  {
14631  allocator->FreeMemory(allocation);
14632  }
14633 }
14634 
14635 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1586
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1479 /*
1480 Define this macro to 0/1 to disable/enable support for recording functionality,
1481 available through VmaAllocatorCreateInfo::pRecordSettings.
1482 */
1483 #ifndef VMA_RECORDING_ENABLED
1484  #ifdef _WIN32
1485  #define VMA_RECORDING_ENABLED 1
1486  #else
1487  #define VMA_RECORDING_ENABLED 0
1488  #endif
1489 #endif
1490 
1491 #ifndef NOMINMAX
1492  #define NOMINMAX // For windows.h
1493 #endif
1494 
1495 #include <vulkan/vulkan.h>
1496 
1497 #if VMA_RECORDING_ENABLED
1498  #include <windows.h>
1499 #endif
1500 
1501 #if !defined(VMA_DEDICATED_ALLOCATION)
1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1503  #define VMA_DEDICATED_ALLOCATION 1
1504  #else
1505  #define VMA_DEDICATED_ALLOCATION 0
1506  #endif
1507 #endif
1508 
1518 VK_DEFINE_HANDLE(VmaAllocator)
1519 
1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1522  VmaAllocator allocator,
1523  uint32_t memoryType,
1524  VkDeviceMemory memory,
1525  VkDeviceSize size);
1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1528  VmaAllocator allocator,
1529  uint32_t memoryType,
1530  VkDeviceMemory memory,
1531  VkDeviceSize size);
1532 
1546 
1576 
1579 typedef VkFlags VmaAllocatorCreateFlags;
1580 
1585 typedef struct VmaVulkanFunctions {
1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1588  PFN_vkAllocateMemory vkAllocateMemory;
1589  PFN_vkFreeMemory vkFreeMemory;
1590  PFN_vkMapMemory vkMapMemory;
1591  PFN_vkUnmapMemory vkUnmapMemory;
1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1594  PFN_vkBindBufferMemory vkBindBufferMemory;
1595  PFN_vkBindImageMemory vkBindImageMemory;
1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1598  PFN_vkCreateBuffer vkCreateBuffer;
1599  PFN_vkDestroyBuffer vkDestroyBuffer;
1600  PFN_vkCreateImage vkCreateImage;
1601  PFN_vkDestroyImage vkDestroyImage;
1602 #if VMA_DEDICATED_ALLOCATION
1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1605 #endif
1607 
1609 typedef enum VmaRecordFlagBits {
1616 
1619 typedef VkFlags VmaRecordFlags;
1620 
1622 typedef struct VmaRecordSettings
1623 {
1633  const char* pFilePath;
1635 
1638 {
1642 
1643  VkPhysicalDevice physicalDevice;
1645 
1646  VkDevice device;
1648 
1651 
1652  const VkAllocationCallbacks* pAllocationCallbacks;
1654 
1693  const VkDeviceSize* pHeapSizeLimit;
1714 
1716 VkResult vmaCreateAllocator(
1717  const VmaAllocatorCreateInfo* pCreateInfo,
1718  VmaAllocator* pAllocator);
1719 
1721 void vmaDestroyAllocator(
1722  VmaAllocator allocator);
1723 
1729  VmaAllocator allocator,
1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1731 
1737  VmaAllocator allocator,
1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1739 
1747  VmaAllocator allocator,
1748  uint32_t memoryTypeIndex,
1749  VkMemoryPropertyFlags* pFlags);
1750 
1760  VmaAllocator allocator,
1761  uint32_t frameIndex);
1762 
1765 typedef struct VmaStatInfo
1766 {
1768  uint32_t blockCount;
1774  VkDeviceSize usedBytes;
1776  VkDeviceSize unusedBytes;
1779 } VmaStatInfo;
1780 
1782 typedef struct VmaStats
1783 {
1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1787 } VmaStats;
1788 
1790 void vmaCalculateStats(
1791  VmaAllocator allocator,
1792  VmaStats* pStats);
1793 
1794 #define VMA_STATS_STRING_ENABLED 1
1795 
1796 #if VMA_STATS_STRING_ENABLED
1797 
1799 
1801 void vmaBuildStatsString(
1802  VmaAllocator allocator,
1803  char** ppStatsString,
1804  VkBool32 detailedMap);
1805 
1806 void vmaFreeStatsString(
1807  VmaAllocator allocator,
1808  char* pStatsString);
1809 
1810 #endif // #if VMA_STATS_STRING_ENABLED
1811 
1820 VK_DEFINE_HANDLE(VmaPool)
1821 
1822 typedef enum VmaMemoryUsage
1823 {
1872 } VmaMemoryUsage;
1873 
1888 
1943 
1959 
1969 
1976 
1980 
1982 {
1995  VkMemoryPropertyFlags requiredFlags;
2000  VkMemoryPropertyFlags preferredFlags;
2008  uint32_t memoryTypeBits;
2021  void* pUserData;
2023 
2040 VkResult vmaFindMemoryTypeIndex(
2041  VmaAllocator allocator,
2042  uint32_t memoryTypeBits,
2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2044  uint32_t* pMemoryTypeIndex);
2045 
2059  VmaAllocator allocator,
2060  const VkBufferCreateInfo* pBufferCreateInfo,
2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2062  uint32_t* pMemoryTypeIndex);
2063 
2077  VmaAllocator allocator,
2078  const VkImageCreateInfo* pImageCreateInfo,
2079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2080  uint32_t* pMemoryTypeIndex);
2081 
2102 
2119 
2130 
2136 
2139 typedef VkFlags VmaPoolCreateFlags;
2140 
2143 typedef struct VmaPoolCreateInfo {
2158  VkDeviceSize blockSize;
2187 
2190 typedef struct VmaPoolStats {
2193  VkDeviceSize size;
2196  VkDeviceSize unusedSize;
2209  VkDeviceSize unusedRangeSizeMax;
2212  size_t blockCount;
2213 } VmaPoolStats;
2214 
2221 VkResult vmaCreatePool(
2222  VmaAllocator allocator,
2223  const VmaPoolCreateInfo* pCreateInfo,
2224  VmaPool* pPool);
2225 
2228 void vmaDestroyPool(
2229  VmaAllocator allocator,
2230  VmaPool pool);
2231 
2238 void vmaGetPoolStats(
2239  VmaAllocator allocator,
2240  VmaPool pool,
2241  VmaPoolStats* pPoolStats);
2242 
2250  VmaAllocator allocator,
2251  VmaPool pool,
2252  size_t* pLostAllocationCount);
2253 
2268 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2269 
2294 VK_DEFINE_HANDLE(VmaAllocation)
2295 
2296 
2298 typedef struct VmaAllocationInfo {
2303  uint32_t memoryType;
2312  VkDeviceMemory deviceMemory;
2317  VkDeviceSize offset;
2322  VkDeviceSize size;
2336  void* pUserData;
2338 
2349 VkResult vmaAllocateMemory(
2350  VmaAllocator allocator,
2351  const VkMemoryRequirements* pVkMemoryRequirements,
2352  const VmaAllocationCreateInfo* pCreateInfo,
2353  VmaAllocation* pAllocation,
2354  VmaAllocationInfo* pAllocationInfo);
2355 
2363  VmaAllocator allocator,
2364  VkBuffer buffer,
2365  const VmaAllocationCreateInfo* pCreateInfo,
2366  VmaAllocation* pAllocation,
2367  VmaAllocationInfo* pAllocationInfo);
2368 
2370 VkResult vmaAllocateMemoryForImage(
2371  VmaAllocator allocator,
2372  VkImage image,
2373  const VmaAllocationCreateInfo* pCreateInfo,
2374  VmaAllocation* pAllocation,
2375  VmaAllocationInfo* pAllocationInfo);
2376 
2378 void vmaFreeMemory(
2379  VmaAllocator allocator,
2380  VmaAllocation allocation);
2381 
2402 VkResult vmaResizeAllocation(
2403  VmaAllocator allocator,
2404  VmaAllocation allocation,
2405  VkDeviceSize newSize);
2406 
2424  VmaAllocator allocator,
2425  VmaAllocation allocation,
2426  VmaAllocationInfo* pAllocationInfo);
2427 
2442 VkBool32 vmaTouchAllocation(
2443  VmaAllocator allocator,
2444  VmaAllocation allocation);
2445 
2460  VmaAllocator allocator,
2461  VmaAllocation allocation,
2462  void* pUserData);
2463 
2475  VmaAllocator allocator,
2476  VmaAllocation* pAllocation);
2477 
2512 VkResult vmaMapMemory(
2513  VmaAllocator allocator,
2514  VmaAllocation allocation,
2515  void** ppData);
2516 
2521 void vmaUnmapMemory(
2522  VmaAllocator allocator,
2523  VmaAllocation allocation);
2524 
2537 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2538 
2551 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2552 
2569 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2570 
2572 typedef struct VmaDefragmentationInfo {
2577  VkDeviceSize maxBytesToMove;
2584 
2586 typedef struct VmaDefragmentationStats {
2588  VkDeviceSize bytesMoved;
2590  VkDeviceSize bytesFreed;
2596 
2635 VkResult vmaDefragment(
2636  VmaAllocator allocator,
2637  VmaAllocation* pAllocations,
2638  size_t allocationCount,
2639  VkBool32* pAllocationsChanged,
2640  const VmaDefragmentationInfo *pDefragmentationInfo,
2641  VmaDefragmentationStats* pDefragmentationStats);
2642 
2655 VkResult vmaBindBufferMemory(
2656  VmaAllocator allocator,
2657  VmaAllocation allocation,
2658  VkBuffer buffer);
2659 
2672 VkResult vmaBindImageMemory(
2673  VmaAllocator allocator,
2674  VmaAllocation allocation,
2675  VkImage image);
2676 
2703 VkResult vmaCreateBuffer(
2704  VmaAllocator allocator,
2705  const VkBufferCreateInfo* pBufferCreateInfo,
2706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2707  VkBuffer* pBuffer,
2708  VmaAllocation* pAllocation,
2709  VmaAllocationInfo* pAllocationInfo);
2710 
2722 void vmaDestroyBuffer(
2723  VmaAllocator allocator,
2724  VkBuffer buffer,
2725  VmaAllocation allocation);
2726 
2728 VkResult vmaCreateImage(
2729  VmaAllocator allocator,
2730  const VkImageCreateInfo* pImageCreateInfo,
2731  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2732  VkImage* pImage,
2733  VmaAllocation* pAllocation,
2734  VmaAllocationInfo* pAllocationInfo);
2735 
2747 void vmaDestroyImage(
2748  VmaAllocator allocator,
2749  VkImage image,
2750  VmaAllocation allocation);
2751 
2752 #ifdef __cplusplus
2753 }
2754 #endif
2755 
2756 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2757 
2758 // For Visual Studio IntelliSense.
2759 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2760 #define VMA_IMPLEMENTATION
2761 #endif
2762 
2763 #ifdef VMA_IMPLEMENTATION
2764 #undef VMA_IMPLEMENTATION
2765 
2766 #include <cstdint>
2767 #include <cstdlib>
2768 #include <cstring>
2769 
2770 /*******************************************************************************
2771 CONFIGURATION SECTION
2772 
2773 Define some of these macros before each #include of this header or change them
2774 here if you need other then default behavior depending on your environment.
2775 */
2776 
2777 /*
2778 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2779 internally, like:
2780 
2781  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2782 
2783 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2784 VmaAllocatorCreateInfo::pVulkanFunctions.
2785 */
2786 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2787 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2788 #endif
2789 
2790 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2791 //#define VMA_USE_STL_CONTAINERS 1
2792 
2793 /* Set this macro to 1 to make the library including and using STL containers:
2794 std::pair, std::vector, std::list, std::unordered_map.
2795 
2796 Set it to 0 or undefined to make the library using its own implementation of
2797 the containers.
2798 */
2799 #if VMA_USE_STL_CONTAINERS
2800  #define VMA_USE_STL_VECTOR 1
2801  #define VMA_USE_STL_UNORDERED_MAP 1
2802  #define VMA_USE_STL_LIST 1
2803 #endif
2804 
2805 #if VMA_USE_STL_VECTOR
2806  #include <vector>
2807 #endif
2808 
2809 #if VMA_USE_STL_UNORDERED_MAP
2810  #include <unordered_map>
2811 #endif
2812 
2813 #if VMA_USE_STL_LIST
2814  #include <list>
2815 #endif
2816 
2817 /*
2818 Following headers are used in this CONFIGURATION section only, so feel free to
2819 remove them if not needed.
2820 */
2821 #include <cassert> // for assert
2822 #include <algorithm> // for min, max
2823 #include <mutex> // for std::mutex
2824 #include <atomic> // for std::atomic
2825 
2826 #ifndef VMA_NULL
2827  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2828  #define VMA_NULL nullptr
2829 #endif
2830 
2831 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2832 #include <cstdlib>
2833 void *aligned_alloc(size_t alignment, size_t size)
2834 {
2835  // alignment must be >= sizeof(void*)
2836  if(alignment < sizeof(void*))
2837  {
2838  alignment = sizeof(void*);
2839  }
2840 
2841  return memalign(alignment, size);
2842 }
2843 #elif defined(__APPLE__) || defined(__ANDROID__)
2844 #include <cstdlib>
2845 void *aligned_alloc(size_t alignment, size_t size)
2846 {
2847  // alignment must be >= sizeof(void*)
2848  if(alignment < sizeof(void*))
2849  {
2850  alignment = sizeof(void*);
2851  }
2852 
2853  void *pointer;
2854  if(posix_memalign(&pointer, alignment, size) == 0)
2855  return pointer;
2856  return VMA_NULL;
2857 }
2858 #endif
2859 
2860 // If your compiler is not compatible with C++11 and definition of
2861 // aligned_alloc() function is missing, uncommeting following line may help:
2862 
2863 //#include <malloc.h>
2864 
2865 // Normal assert to check for programmer's errors, especially in Debug configuration.
2866 #ifndef VMA_ASSERT
2867  #ifdef _DEBUG
2868  #define VMA_ASSERT(expr) assert(expr)
2869  #else
2870  #define VMA_ASSERT(expr)
2871  #endif
2872 #endif
2873 
2874 // Assert that will be called very often, like inside data structures e.g. operator[].
2875 // Making it non-empty can make program slow.
2876 #ifndef VMA_HEAVY_ASSERT
2877  #ifdef _DEBUG
2878  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2879  #else
2880  #define VMA_HEAVY_ASSERT(expr)
2881  #endif
2882 #endif
2883 
2884 #ifndef VMA_ALIGN_OF
2885  #define VMA_ALIGN_OF(type) (__alignof(type))
2886 #endif
2887 
2888 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2889  #if defined(_WIN32)
2890  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2891  #else
2892  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2893  #endif
2894 #endif
2895 
2896 #ifndef VMA_SYSTEM_FREE
2897  #if defined(_WIN32)
2898  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2899  #else
2900  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2901  #endif
2902 #endif
2903 
2904 #ifndef VMA_MIN
2905  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2906 #endif
2907 
2908 #ifndef VMA_MAX
2909  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2910 #endif
2911 
2912 #ifndef VMA_SWAP
2913  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2914 #endif
2915 
2916 #ifndef VMA_SORT
2917  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2918 #endif
2919 
2920 #ifndef VMA_DEBUG_LOG
2921  #define VMA_DEBUG_LOG(format, ...)
2922  /*
2923  #define VMA_DEBUG_LOG(format, ...) do { \
2924  printf(format, __VA_ARGS__); \
2925  printf("\n"); \
2926  } while(false)
2927  */
2928 #endif
2929 
2930 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2931 #if VMA_STATS_STRING_ENABLED
2932  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2933  {
2934  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2935  }
2936  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2937  {
2938  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2939  }
2940  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2941  {
2942  snprintf(outStr, strLen, "%p", ptr);
2943  }
2944 #endif
2945 
2946 #ifndef VMA_MUTEX
2947  class VmaMutex
2948  {
2949  public:
2950  VmaMutex() { }
2951  ~VmaMutex() { }
2952  void Lock() { m_Mutex.lock(); }
2953  void Unlock() { m_Mutex.unlock(); }
2954  private:
2955  std::mutex m_Mutex;
2956  };
2957  #define VMA_MUTEX VmaMutex
2958 #endif
2959 
2960 /*
2961 If providing your own implementation, you need to implement a subset of std::atomic:
2962 
2963 - Constructor(uint32_t desired)
2964 - uint32_t load() const
2965 - void store(uint32_t desired)
2966 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2967 */
2968 #ifndef VMA_ATOMIC_UINT32
2969  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2970 #endif
2971 
2972 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2973 
2977  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2978 #endif
2979 
2980 #ifndef VMA_DEBUG_ALIGNMENT
2981 
2985  #define VMA_DEBUG_ALIGNMENT (1)
2986 #endif
2987 
2988 #ifndef VMA_DEBUG_MARGIN
2989 
2993  #define VMA_DEBUG_MARGIN (0)
2994 #endif
2995 
2996 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2997 
3001  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3002 #endif
3003 
3004 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3005 
3010  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3011 #endif
3012 
3013 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3014 
3018  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3019 #endif
3020 
3021 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3022 
3026  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3027 #endif
3028 
3029 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3030  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3032 #endif
3033 
3034 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3035  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3037 #endif
3038 
3039 #ifndef VMA_CLASS_NO_COPY
3040  #define VMA_CLASS_NO_COPY(className) \
3041  private: \
3042  className(const className&) = delete; \
3043  className& operator=(const className&) = delete;
3044 #endif
3045 
3046 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3047 
3048 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3049 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3050 
3051 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3053 
3054 /*******************************************************************************
3055 END OF CONFIGURATION
3056 */
3057 
3058 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3059  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3060 
3061 // Returns number of bits set to 1 in (v).
3062 static inline uint32_t VmaCountBitsSet(uint32_t v)
3063 {
3064  uint32_t c = v - ((v >> 1) & 0x55555555);
3065  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3066  c = ((c >> 4) + c) & 0x0F0F0F0F;
3067  c = ((c >> 8) + c) & 0x00FF00FF;
3068  c = ((c >> 16) + c) & 0x0000FFFF;
3069  return c;
3070 }
3071 
3072 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3073 // Use types like uint32_t, uint64_t as T.
3074 template <typename T>
3075 static inline T VmaAlignUp(T val, T align)
3076 {
3077  return (val + align - 1) / align * align;
3078 }
3079 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3080 // Use types like uint32_t, uint64_t as T.
3081 template <typename T>
3082 static inline T VmaAlignDown(T val, T align)
3083 {
3084  return val / align * align;
3085 }
3086 
3087 // Division with mathematical rounding to nearest number.
3088 template <typename T>
3089 static inline T VmaRoundDiv(T x, T y)
3090 {
3091  return (x + (y / (T)2)) / y;
3092 }
3093 
3094 /*
3095 Returns true if given number is a power of two.
3096 T must be unsigned integer number or signed integer but always nonnegative.
3097 For 0 returns true.
3098 */
3099 template <typename T>
3100 inline bool VmaIsPow2(T x)
3101 {
3102  return (x & (x-1)) == 0;
3103 }
3104 
3105 // Returns smallest power of 2 greater or equal to v.
3106 static inline uint32_t VmaNextPow2(uint32_t v)
3107 {
3108  v--;
3109  v |= v >> 1;
3110  v |= v >> 2;
3111  v |= v >> 4;
3112  v |= v >> 8;
3113  v |= v >> 16;
3114  v++;
3115  return v;
3116 }
3117 static inline uint64_t VmaNextPow2(uint64_t v)
3118 {
3119  v--;
3120  v |= v >> 1;
3121  v |= v >> 2;
3122  v |= v >> 4;
3123  v |= v >> 8;
3124  v |= v >> 16;
3125  v |= v >> 32;
3126  v++;
3127  return v;
3128 }
3129 
3130 // Returns largest power of 2 less or equal to v.
3131 static inline uint32_t VmaPrevPow2(uint32_t v)
3132 {
3133  v |= v >> 1;
3134  v |= v >> 2;
3135  v |= v >> 4;
3136  v |= v >> 8;
3137  v |= v >> 16;
3138  v = v ^ (v >> 1);
3139  return v;
3140 }
3141 static inline uint64_t VmaPrevPow2(uint64_t v)
3142 {
3143  v |= v >> 1;
3144  v |= v >> 2;
3145  v |= v >> 4;
3146  v |= v >> 8;
3147  v |= v >> 16;
3148  v |= v >> 32;
3149  v = v ^ (v >> 1);
3150  return v;
3151 }
3152 
3153 static inline bool VmaStrIsEmpty(const char* pStr)
3154 {
3155  return pStr == VMA_NULL || *pStr == '\0';
3156 }
3157 
3158 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3159 {
3160  switch(algorithm)
3161  {
3163  return "Linear";
3165  return "Buddy";
3166  case 0:
3167  return "Default";
3168  default:
3169  VMA_ASSERT(0);
3170  return "";
3171  }
3172 }
3173 
3174 #ifndef VMA_SORT
3175 
3176 template<typename Iterator, typename Compare>
3177 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3178 {
3179  Iterator centerValue = end; --centerValue;
3180  Iterator insertIndex = beg;
3181  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3182  {
3183  if(cmp(*memTypeIndex, *centerValue))
3184  {
3185  if(insertIndex != memTypeIndex)
3186  {
3187  VMA_SWAP(*memTypeIndex, *insertIndex);
3188  }
3189  ++insertIndex;
3190  }
3191  }
3192  if(insertIndex != centerValue)
3193  {
3194  VMA_SWAP(*insertIndex, *centerValue);
3195  }
3196  return insertIndex;
3197 }
3198 
3199 template<typename Iterator, typename Compare>
3200 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3201 {
3202  if(beg < end)
3203  {
3204  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3205  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3206  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3207  }
3208 }
3209 
3210 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3211 
3212 #endif // #ifndef VMA_SORT
3213 
3214 /*
3215 Returns true if two memory blocks occupy overlapping pages.
3216 ResourceA must be in less memory offset than ResourceB.
3217 
3218 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3219 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3220 */
3221 static inline bool VmaBlocksOnSamePage(
3222  VkDeviceSize resourceAOffset,
3223  VkDeviceSize resourceASize,
3224  VkDeviceSize resourceBOffset,
3225  VkDeviceSize pageSize)
3226 {
3227  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3228  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3229  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3230  VkDeviceSize resourceBStart = resourceBOffset;
3231  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3232  return resourceAEndPage == resourceBStartPage;
3233 }
3234 
3235 enum VmaSuballocationType
3236 {
3237  VMA_SUBALLOCATION_TYPE_FREE = 0,
3238  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3239  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3240  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3241  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3242  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3243  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3244 };
3245 
3246 /*
3247 Returns true if given suballocation types could conflict and must respect
3248 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3249 or linear image and another one is optimal image. If type is unknown, behave
3250 conservatively.
3251 */
3252 static inline bool VmaIsBufferImageGranularityConflict(
3253  VmaSuballocationType suballocType1,
3254  VmaSuballocationType suballocType2)
3255 {
3256  if(suballocType1 > suballocType2)
3257  {
3258  VMA_SWAP(suballocType1, suballocType2);
3259  }
3260 
3261  switch(suballocType1)
3262  {
3263  case VMA_SUBALLOCATION_TYPE_FREE:
3264  return false;
3265  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3266  return true;
3267  case VMA_SUBALLOCATION_TYPE_BUFFER:
3268  return
3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3271  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3272  return
3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3276  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3277  return
3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3279  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3280  return false;
3281  default:
3282  VMA_ASSERT(0);
3283  return true;
3284  }
3285 }
3286 
3287 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3288 {
3289  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3290  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3291  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3292  {
3293  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3294  }
3295 }
3296 
3297 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3298 {
3299  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3300  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3301  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3302  {
3303  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3304  {
3305  return false;
3306  }
3307  }
3308  return true;
3309 }
3310 
3311 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3312 struct VmaMutexLock
3313 {
3314  VMA_CLASS_NO_COPY(VmaMutexLock)
3315 public:
3316  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3317  m_pMutex(useMutex ? &mutex : VMA_NULL)
3318  {
3319  if(m_pMutex)
3320  {
3321  m_pMutex->Lock();
3322  }
3323  }
3324 
3325  ~VmaMutexLock()
3326  {
3327  if(m_pMutex)
3328  {
3329  m_pMutex->Unlock();
3330  }
3331  }
3332 
3333 private:
3334  VMA_MUTEX* m_pMutex;
3335 };
3336 
3337 #if VMA_DEBUG_GLOBAL_MUTEX
3338  static VMA_MUTEX gDebugGlobalMutex;
3339  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3340 #else
3341  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3342 #endif
3343 
3344 // Minimum size of a free suballocation to register it in the free suballocation collection.
3345 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3346 
3347 /*
3348 Performs binary search and returns iterator to first element that is greater or
3349 equal to (key), according to comparison (cmp).
3350 
3351 Cmp should return true if first argument is less than second argument.
3352 
3353 Returned value is the found element, if present in the collection or place where
3354 new element with value (key) should be inserted.
3355 */
3356 template <typename CmpLess, typename IterT, typename KeyT>
3357 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3358 {
3359  size_t down = 0, up = (end - beg);
3360  while(down < up)
3361  {
3362  const size_t mid = (down + up) / 2;
3363  if(cmp(*(beg+mid), key))
3364  {
3365  down = mid + 1;
3366  }
3367  else
3368  {
3369  up = mid;
3370  }
3371  }
3372  return beg + down;
3373 }
3374 
3376 // Memory allocation
3377 
3378 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3379 {
3380  if((pAllocationCallbacks != VMA_NULL) &&
3381  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3382  {
3383  return (*pAllocationCallbacks->pfnAllocation)(
3384  pAllocationCallbacks->pUserData,
3385  size,
3386  alignment,
3387  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3388  }
3389  else
3390  {
3391  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3392  }
3393 }
3394 
3395 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3396 {
3397  if((pAllocationCallbacks != VMA_NULL) &&
3398  (pAllocationCallbacks->pfnFree != VMA_NULL))
3399  {
3400  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3401  }
3402  else
3403  {
3404  VMA_SYSTEM_FREE(ptr);
3405  }
3406 }
3407 
3408 template<typename T>
3409 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3410 {
3411  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3412 }
3413 
3414 template<typename T>
3415 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3416 {
3417  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3418 }
3419 
3420 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3421 
3422 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3423 
3424 template<typename T>
3425 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3426 {
3427  ptr->~T();
3428  VmaFree(pAllocationCallbacks, ptr);
3429 }
3430 
3431 template<typename T>
3432 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3433 {
3434  if(ptr != VMA_NULL)
3435  {
3436  for(size_t i = count; i--; )
3437  {
3438  ptr[i].~T();
3439  }
3440  VmaFree(pAllocationCallbacks, ptr);
3441  }
3442 }
3443 
3444 // STL-compatible allocator.
3445 template<typename T>
3446 class VmaStlAllocator
3447 {
3448 public:
3449  const VkAllocationCallbacks* const m_pCallbacks;
3450  typedef T value_type;
3451 
3452  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3453  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3454 
3455  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3456  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3457 
3458  template<typename U>
3459  bool operator==(const VmaStlAllocator<U>& rhs) const
3460  {
3461  return m_pCallbacks == rhs.m_pCallbacks;
3462  }
3463  template<typename U>
3464  bool operator!=(const VmaStlAllocator<U>& rhs) const
3465  {
3466  return m_pCallbacks != rhs.m_pCallbacks;
3467  }
3468 
3469  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3470 };
3471 
3472 #if VMA_USE_STL_VECTOR
3473 
3474 #define VmaVector std::vector
3475 
3476 template<typename T, typename allocatorT>
3477 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3478 {
3479  vec.insert(vec.begin() + index, item);
3480 }
3481 
3482 template<typename T, typename allocatorT>
3483 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3484 {
3485  vec.erase(vec.begin() + index);
3486 }
3487 
3488 #else // #if VMA_USE_STL_VECTOR
3489 
3490 /* Class with interface compatible with subset of std::vector.
3491 T must be POD because constructors and destructors are not called and memcpy is
3492 used for these objects. */
3493 template<typename T, typename AllocatorT>
3494 class VmaVector
3495 {
3496 public:
3497  typedef T value_type;
3498 
3499  VmaVector(const AllocatorT& allocator) :
3500  m_Allocator(allocator),
3501  m_pArray(VMA_NULL),
3502  m_Count(0),
3503  m_Capacity(0)
3504  {
3505  }
3506 
3507  VmaVector(size_t count, const AllocatorT& allocator) :
3508  m_Allocator(allocator),
3509  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3510  m_Count(count),
3511  m_Capacity(count)
3512  {
3513  }
3514 
3515  VmaVector(const VmaVector<T, AllocatorT>& src) :
3516  m_Allocator(src.m_Allocator),
3517  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3518  m_Count(src.m_Count),
3519  m_Capacity(src.m_Count)
3520  {
3521  if(m_Count != 0)
3522  {
3523  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3524  }
3525  }
3526 
3527  ~VmaVector()
3528  {
3529  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3530  }
3531 
3532  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3533  {
3534  if(&rhs != this)
3535  {
3536  resize(rhs.m_Count);
3537  if(m_Count != 0)
3538  {
3539  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3540  }
3541  }
3542  return *this;
3543  }
3544 
3545  bool empty() const { return m_Count == 0; }
3546  size_t size() const { return m_Count; }
3547  T* data() { return m_pArray; }
3548  const T* data() const { return m_pArray; }
3549 
3550  T& operator[](size_t index)
3551  {
3552  VMA_HEAVY_ASSERT(index < m_Count);
3553  return m_pArray[index];
3554  }
3555  const T& operator[](size_t index) const
3556  {
3557  VMA_HEAVY_ASSERT(index < m_Count);
3558  return m_pArray[index];
3559  }
3560 
3561  T& front()
3562  {
3563  VMA_HEAVY_ASSERT(m_Count > 0);
3564  return m_pArray[0];
3565  }
3566  const T& front() const
3567  {
3568  VMA_HEAVY_ASSERT(m_Count > 0);
3569  return m_pArray[0];
3570  }
3571  T& back()
3572  {
3573  VMA_HEAVY_ASSERT(m_Count > 0);
3574  return m_pArray[m_Count - 1];
3575  }
3576  const T& back() const
3577  {
3578  VMA_HEAVY_ASSERT(m_Count > 0);
3579  return m_pArray[m_Count - 1];
3580  }
3581 
3582  void reserve(size_t newCapacity, bool freeMemory = false)
3583  {
3584  newCapacity = VMA_MAX(newCapacity, m_Count);
3585 
3586  if((newCapacity < m_Capacity) && !freeMemory)
3587  {
3588  newCapacity = m_Capacity;
3589  }
3590 
3591  if(newCapacity != m_Capacity)
3592  {
3593  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3594  if(m_Count != 0)
3595  {
3596  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3597  }
3598  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3599  m_Capacity = newCapacity;
3600  m_pArray = newArray;
3601  }
3602  }
3603 
3604  void resize(size_t newCount, bool freeMemory = false)
3605  {
3606  size_t newCapacity = m_Capacity;
3607  if(newCount > m_Capacity)
3608  {
3609  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3610  }
3611  else if(freeMemory)
3612  {
3613  newCapacity = newCount;
3614  }
3615 
3616  if(newCapacity != m_Capacity)
3617  {
3618  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3619  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3620  if(elementsToCopy != 0)
3621  {
3622  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3623  }
3624  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3625  m_Capacity = newCapacity;
3626  m_pArray = newArray;
3627  }
3628 
3629  m_Count = newCount;
3630  }
3631 
3632  void clear(bool freeMemory = false)
3633  {
3634  resize(0, freeMemory);
3635  }
3636 
3637  void insert(size_t index, const T& src)
3638  {
3639  VMA_HEAVY_ASSERT(index <= m_Count);
3640  const size_t oldCount = size();
3641  resize(oldCount + 1);
3642  if(index < oldCount)
3643  {
3644  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3645  }
3646  m_pArray[index] = src;
3647  }
3648 
3649  void remove(size_t index)
3650  {
3651  VMA_HEAVY_ASSERT(index < m_Count);
3652  const size_t oldCount = size();
3653  if(index < oldCount - 1)
3654  {
3655  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3656  }
3657  resize(oldCount - 1);
3658  }
3659 
3660  void push_back(const T& src)
3661  {
3662  const size_t newIndex = size();
3663  resize(newIndex + 1);
3664  m_pArray[newIndex] = src;
3665  }
3666 
3667  void pop_back()
3668  {
3669  VMA_HEAVY_ASSERT(m_Count > 0);
3670  resize(size() - 1);
3671  }
3672 
3673  void push_front(const T& src)
3674  {
3675  insert(0, src);
3676  }
3677 
3678  void pop_front()
3679  {
3680  VMA_HEAVY_ASSERT(m_Count > 0);
3681  remove(0);
3682  }
3683 
3684  typedef T* iterator;
3685 
3686  iterator begin() { return m_pArray; }
3687  iterator end() { return m_pArray + m_Count; }
3688 
3689 private:
3690  AllocatorT m_Allocator;
3691  T* m_pArray;
3692  size_t m_Count;
3693  size_t m_Capacity;
3694 };
3695 
3696 template<typename T, typename allocatorT>
3697 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3698 {
3699  vec.insert(index, item);
3700 }
3701 
3702 template<typename T, typename allocatorT>
3703 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3704 {
3705  vec.remove(index);
3706 }
3707 
3708 #endif // #if VMA_USE_STL_VECTOR
3709 
3710 template<typename CmpLess, typename VectorT>
3711 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3712 {
3713  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3714  vector.data(),
3715  vector.data() + vector.size(),
3716  value,
3717  CmpLess()) - vector.data();
3718  VmaVectorInsert(vector, indexToInsert, value);
3719  return indexToInsert;
3720 }
3721 
3722 template<typename CmpLess, typename VectorT>
3723 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3724 {
3725  CmpLess comparator;
3726  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3727  vector.begin(),
3728  vector.end(),
3729  value,
3730  comparator);
3731  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3732  {
3733  size_t indexToRemove = it - vector.begin();
3734  VmaVectorRemove(vector, indexToRemove);
3735  return true;
3736  }
3737  return false;
3738 }
3739 
3740 template<typename CmpLess, typename IterT, typename KeyT>
3741 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3742 {
3743  CmpLess comparator;
3744  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3745  beg, end, value, comparator);
3746  if(it == end ||
3747  (!comparator(*it, value) && !comparator(value, *it)))
3748  {
3749  return it;
3750  }
3751  return end;
3752 }
3753 
3755 // class VmaPoolAllocator
3756 
3757 /*
3758 Allocator for objects of type T using a list of arrays (pools) to speed up
3759 allocation. Number of elements that can be allocated is not bounded because
3760 allocator can create multiple blocks.
3761 */
3762 template<typename T>
3763 class VmaPoolAllocator
3764 {
3765  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3766 public:
3767  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3768  ~VmaPoolAllocator();
3769  void Clear();
3770  T* Alloc();
3771  void Free(T* ptr);
3772 
3773 private:
3774  union Item
3775  {
3776  uint32_t NextFreeIndex;
3777  T Value;
3778  };
3779 
3780  struct ItemBlock
3781  {
3782  Item* pItems;
3783  uint32_t FirstFreeIndex;
3784  };
3785 
3786  const VkAllocationCallbacks* m_pAllocationCallbacks;
3787  size_t m_ItemsPerBlock;
3788  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3789 
3790  ItemBlock& CreateNewBlock();
3791 };
3792 
3793 template<typename T>
3794 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3795  m_pAllocationCallbacks(pAllocationCallbacks),
3796  m_ItemsPerBlock(itemsPerBlock),
3797  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3798 {
3799  VMA_ASSERT(itemsPerBlock > 0);
3800 }
3801 
3802 template<typename T>
3803 VmaPoolAllocator<T>::~VmaPoolAllocator()
3804 {
3805  Clear();
3806 }
3807 
3808 template<typename T>
3809 void VmaPoolAllocator<T>::Clear()
3810 {
3811  for(size_t i = m_ItemBlocks.size(); i--; )
3812  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3813  m_ItemBlocks.clear();
3814 }
3815 
3816 template<typename T>
3817 T* VmaPoolAllocator<T>::Alloc()
3818 {
3819  for(size_t i = m_ItemBlocks.size(); i--; )
3820  {
3821  ItemBlock& block = m_ItemBlocks[i];
3822  // This block has some free items: Use first one.
3823  if(block.FirstFreeIndex != UINT32_MAX)
3824  {
3825  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3826  block.FirstFreeIndex = pItem->NextFreeIndex;
3827  return &pItem->Value;
3828  }
3829  }
3830 
3831  // No block has free item: Create new one and use it.
3832  ItemBlock& newBlock = CreateNewBlock();
3833  Item* const pItem = &newBlock.pItems[0];
3834  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3835  return &pItem->Value;
3836 }
3837 
3838 template<typename T>
3839 void VmaPoolAllocator<T>::Free(T* ptr)
3840 {
3841  // Search all memory blocks to find ptr.
3842  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3843  {
3844  ItemBlock& block = m_ItemBlocks[i];
3845 
3846  // Casting to union.
3847  Item* pItemPtr;
3848  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3849 
3850  // Check if pItemPtr is in address range of this block.
3851  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3852  {
3853  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3854  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3855  block.FirstFreeIndex = index;
3856  return;
3857  }
3858  }
3859  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3860 }
3861 
3862 template<typename T>
3863 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3864 {
3865  ItemBlock newBlock = {
3866  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3867 
3868  m_ItemBlocks.push_back(newBlock);
3869 
3870  // Setup singly-linked list of all free items in this block.
3871  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3872  newBlock.pItems[i].NextFreeIndex = i + 1;
3873  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3874  return m_ItemBlocks.back();
3875 }
3876 
3878 // class VmaRawList, VmaList
3879 
3880 #if VMA_USE_STL_LIST
3881 
3882 #define VmaList std::list
3883 
3884 #else // #if VMA_USE_STL_LIST
3885 
3886 template<typename T>
3887 struct VmaListItem
3888 {
3889  VmaListItem* pPrev;
3890  VmaListItem* pNext;
3891  T Value;
3892 };
3893 
3894 // Doubly linked list.
3895 template<typename T>
3896 class VmaRawList
3897 {
3898  VMA_CLASS_NO_COPY(VmaRawList)
3899 public:
3900  typedef VmaListItem<T> ItemType;
3901 
3902  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3903  ~VmaRawList();
3904  void Clear();
3905 
3906  size_t GetCount() const { return m_Count; }
3907  bool IsEmpty() const { return m_Count == 0; }
3908 
3909  ItemType* Front() { return m_pFront; }
3910  const ItemType* Front() const { return m_pFront; }
3911  ItemType* Back() { return m_pBack; }
3912  const ItemType* Back() const { return m_pBack; }
3913 
3914  ItemType* PushBack();
3915  ItemType* PushFront();
3916  ItemType* PushBack(const T& value);
3917  ItemType* PushFront(const T& value);
3918  void PopBack();
3919  void PopFront();
3920 
3921  // Item can be null - it means PushBack.
3922  ItemType* InsertBefore(ItemType* pItem);
3923  // Item can be null - it means PushFront.
3924  ItemType* InsertAfter(ItemType* pItem);
3925 
3926  ItemType* InsertBefore(ItemType* pItem, const T& value);
3927  ItemType* InsertAfter(ItemType* pItem, const T& value);
3928 
3929  void Remove(ItemType* pItem);
3930 
3931 private:
3932  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3933  VmaPoolAllocator<ItemType> m_ItemAllocator;
3934  ItemType* m_pFront;
3935  ItemType* m_pBack;
3936  size_t m_Count;
3937 };
3938 
3939 template<typename T>
3940 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3941  m_pAllocationCallbacks(pAllocationCallbacks),
3942  m_ItemAllocator(pAllocationCallbacks, 128),
3943  m_pFront(VMA_NULL),
3944  m_pBack(VMA_NULL),
3945  m_Count(0)
3946 {
3947 }
3948 
3949 template<typename T>
3950 VmaRawList<T>::~VmaRawList()
3951 {
3952  // Intentionally not calling Clear, because that would be unnecessary
3953  // computations to return all items to m_ItemAllocator as free.
3954 }
3955 
3956 template<typename T>
3957 void VmaRawList<T>::Clear()
3958 {
3959  if(IsEmpty() == false)
3960  {
3961  ItemType* pItem = m_pBack;
3962  while(pItem != VMA_NULL)
3963  {
3964  ItemType* const pPrevItem = pItem->pPrev;
3965  m_ItemAllocator.Free(pItem);
3966  pItem = pPrevItem;
3967  }
3968  m_pFront = VMA_NULL;
3969  m_pBack = VMA_NULL;
3970  m_Count = 0;
3971  }
3972 }
3973 
3974 template<typename T>
3975 VmaListItem<T>* VmaRawList<T>::PushBack()
3976 {
3977  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3978  pNewItem->pNext = VMA_NULL;
3979  if(IsEmpty())
3980  {
3981  pNewItem->pPrev = VMA_NULL;
3982  m_pFront = pNewItem;
3983  m_pBack = pNewItem;
3984  m_Count = 1;
3985  }
3986  else
3987  {
3988  pNewItem->pPrev = m_pBack;
3989  m_pBack->pNext = pNewItem;
3990  m_pBack = pNewItem;
3991  ++m_Count;
3992  }
3993  return pNewItem;
3994 }
3995 
3996 template<typename T>
3997 VmaListItem<T>* VmaRawList<T>::PushFront()
3998 {
3999  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4000  pNewItem->pPrev = VMA_NULL;
4001  if(IsEmpty())
4002  {
4003  pNewItem->pNext = VMA_NULL;
4004  m_pFront = pNewItem;
4005  m_pBack = pNewItem;
4006  m_Count = 1;
4007  }
4008  else
4009  {
4010  pNewItem->pNext = m_pFront;
4011  m_pFront->pPrev = pNewItem;
4012  m_pFront = pNewItem;
4013  ++m_Count;
4014  }
4015  return pNewItem;
4016 }
4017 
4018 template<typename T>
4019 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4020 {
4021  ItemType* const pNewItem = PushBack();
4022  pNewItem->Value = value;
4023  return pNewItem;
4024 }
4025 
4026 template<typename T>
4027 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4028 {
4029  ItemType* const pNewItem = PushFront();
4030  pNewItem->Value = value;
4031  return pNewItem;
4032 }
4033 
4034 template<typename T>
4035 void VmaRawList<T>::PopBack()
4036 {
4037  VMA_HEAVY_ASSERT(m_Count > 0);
4038  ItemType* const pBackItem = m_pBack;
4039  ItemType* const pPrevItem = pBackItem->pPrev;
4040  if(pPrevItem != VMA_NULL)
4041  {
4042  pPrevItem->pNext = VMA_NULL;
4043  }
4044  m_pBack = pPrevItem;
4045  m_ItemAllocator.Free(pBackItem);
4046  --m_Count;
4047 }
4048 
4049 template<typename T>
4050 void VmaRawList<T>::PopFront()
4051 {
4052  VMA_HEAVY_ASSERT(m_Count > 0);
4053  ItemType* const pFrontItem = m_pFront;
4054  ItemType* const pNextItem = pFrontItem->pNext;
4055  if(pNextItem != VMA_NULL)
4056  {
4057  pNextItem->pPrev = VMA_NULL;
4058  }
4059  m_pFront = pNextItem;
4060  m_ItemAllocator.Free(pFrontItem);
4061  --m_Count;
4062 }
4063 
4064 template<typename T>
4065 void VmaRawList<T>::Remove(ItemType* pItem)
4066 {
4067  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4068  VMA_HEAVY_ASSERT(m_Count > 0);
4069 
4070  if(pItem->pPrev != VMA_NULL)
4071  {
4072  pItem->pPrev->pNext = pItem->pNext;
4073  }
4074  else
4075  {
4076  VMA_HEAVY_ASSERT(m_pFront == pItem);
4077  m_pFront = pItem->pNext;
4078  }
4079 
4080  if(pItem->pNext != VMA_NULL)
4081  {
4082  pItem->pNext->pPrev = pItem->pPrev;
4083  }
4084  else
4085  {
4086  VMA_HEAVY_ASSERT(m_pBack == pItem);
4087  m_pBack = pItem->pPrev;
4088  }
4089 
4090  m_ItemAllocator.Free(pItem);
4091  --m_Count;
4092 }
4093 
4094 template<typename T>
4095 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4096 {
4097  if(pItem != VMA_NULL)
4098  {
4099  ItemType* const prevItem = pItem->pPrev;
4100  ItemType* const newItem = m_ItemAllocator.Alloc();
4101  newItem->pPrev = prevItem;
4102  newItem->pNext = pItem;
4103  pItem->pPrev = newItem;
4104  if(prevItem != VMA_NULL)
4105  {
4106  prevItem->pNext = newItem;
4107  }
4108  else
4109  {
4110  VMA_HEAVY_ASSERT(m_pFront == pItem);
4111  m_pFront = newItem;
4112  }
4113  ++m_Count;
4114  return newItem;
4115  }
4116  else
4117  return PushBack();
4118 }
4119 
4120 template<typename T>
4121 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4122 {
4123  if(pItem != VMA_NULL)
4124  {
4125  ItemType* const nextItem = pItem->pNext;
4126  ItemType* const newItem = m_ItemAllocator.Alloc();
4127  newItem->pNext = nextItem;
4128  newItem->pPrev = pItem;
4129  pItem->pNext = newItem;
4130  if(nextItem != VMA_NULL)
4131  {
4132  nextItem->pPrev = newItem;
4133  }
4134  else
4135  {
4136  VMA_HEAVY_ASSERT(m_pBack == pItem);
4137  m_pBack = newItem;
4138  }
4139  ++m_Count;
4140  return newItem;
4141  }
4142  else
4143  return PushFront();
4144 }
4145 
4146 template<typename T>
4147 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4148 {
4149  ItemType* const newItem = InsertBefore(pItem);
4150  newItem->Value = value;
4151  return newItem;
4152 }
4153 
4154 template<typename T>
4155 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4156 {
4157  ItemType* const newItem = InsertAfter(pItem);
4158  newItem->Value = value;
4159  return newItem;
4160 }
4161 
4162 template<typename T, typename AllocatorT>
4163 class VmaList
4164 {
4165  VMA_CLASS_NO_COPY(VmaList)
4166 public:
4167  class iterator
4168  {
4169  public:
4170  iterator() :
4171  m_pList(VMA_NULL),
4172  m_pItem(VMA_NULL)
4173  {
4174  }
4175 
4176  T& operator*() const
4177  {
4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4179  return m_pItem->Value;
4180  }
4181  T* operator->() const
4182  {
4183  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4184  return &m_pItem->Value;
4185  }
4186 
4187  iterator& operator++()
4188  {
4189  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4190  m_pItem = m_pItem->pNext;
4191  return *this;
4192  }
4193  iterator& operator--()
4194  {
4195  if(m_pItem != VMA_NULL)
4196  {
4197  m_pItem = m_pItem->pPrev;
4198  }
4199  else
4200  {
4201  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4202  m_pItem = m_pList->Back();
4203  }
4204  return *this;
4205  }
4206 
4207  iterator operator++(int)
4208  {
4209  iterator result = *this;
4210  ++*this;
4211  return result;
4212  }
4213  iterator operator--(int)
4214  {
4215  iterator result = *this;
4216  --*this;
4217  return result;
4218  }
4219 
4220  bool operator==(const iterator& rhs) const
4221  {
4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4223  return m_pItem == rhs.m_pItem;
4224  }
4225  bool operator!=(const iterator& rhs) const
4226  {
4227  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4228  return m_pItem != rhs.m_pItem;
4229  }
4230 
4231  private:
4232  VmaRawList<T>* m_pList;
4233  VmaListItem<T>* m_pItem;
4234 
4235  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4236  m_pList(pList),
4237  m_pItem(pItem)
4238  {
4239  }
4240 
4241  friend class VmaList<T, AllocatorT>;
4242  };
4243 
4244  class const_iterator
4245  {
4246  public:
4247  const_iterator() :
4248  m_pList(VMA_NULL),
4249  m_pItem(VMA_NULL)
4250  {
4251  }
4252 
4253  const_iterator(const iterator& src) :
4254  m_pList(src.m_pList),
4255  m_pItem(src.m_pItem)
4256  {
4257  }
4258 
4259  const T& operator*() const
4260  {
4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4262  return m_pItem->Value;
4263  }
4264  const T* operator->() const
4265  {
4266  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4267  return &m_pItem->Value;
4268  }
4269 
4270  const_iterator& operator++()
4271  {
4272  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4273  m_pItem = m_pItem->pNext;
4274  return *this;
4275  }
4276  const_iterator& operator--()
4277  {
4278  if(m_pItem != VMA_NULL)
4279  {
4280  m_pItem = m_pItem->pPrev;
4281  }
4282  else
4283  {
4284  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4285  m_pItem = m_pList->Back();
4286  }
4287  return *this;
4288  }
4289 
4290  const_iterator operator++(int)
4291  {
4292  const_iterator result = *this;
4293  ++*this;
4294  return result;
4295  }
4296  const_iterator operator--(int)
4297  {
4298  const_iterator result = *this;
4299  --*this;
4300  return result;
4301  }
4302 
4303  bool operator==(const const_iterator& rhs) const
4304  {
4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4306  return m_pItem == rhs.m_pItem;
4307  }
4308  bool operator!=(const const_iterator& rhs) const
4309  {
4310  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4311  return m_pItem != rhs.m_pItem;
4312  }
4313 
4314  private:
4315  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4316  m_pList(pList),
4317  m_pItem(pItem)
4318  {
4319  }
4320 
4321  const VmaRawList<T>* m_pList;
4322  const VmaListItem<T>* m_pItem;
4323 
4324  friend class VmaList<T, AllocatorT>;
4325  };
4326 
4327  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4328 
4329  bool empty() const { return m_RawList.IsEmpty(); }
4330  size_t size() const { return m_RawList.GetCount(); }
4331 
4332  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4333  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4334 
4335  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4336  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4337 
4338  void clear() { m_RawList.Clear(); }
4339  void push_back(const T& value) { m_RawList.PushBack(value); }
4340  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4341  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4342 
4343 private:
4344  VmaRawList<T> m_RawList;
4345 };
4346 
4347 #endif // #if VMA_USE_STL_LIST
4348 
4350 // class VmaMap
4351 
4352 // Unused in this version.
4353 #if 0
4354 
4355 #if VMA_USE_STL_UNORDERED_MAP
4356 
4357 #define VmaPair std::pair
4358 
4359 #define VMA_MAP_TYPE(KeyT, ValueT) \
4360  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4361 
4362 #else // #if VMA_USE_STL_UNORDERED_MAP
4363 
4364 template<typename T1, typename T2>
4365 struct VmaPair
4366 {
4367  T1 first;
4368  T2 second;
4369 
4370  VmaPair() : first(), second() { }
4371  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4372 };
4373 
4374 /* Class compatible with subset of interface of std::unordered_map.
4375 KeyT, ValueT must be POD because they will be stored in VmaVector.
4376 */
4377 template<typename KeyT, typename ValueT>
4378 class VmaMap
4379 {
4380 public:
4381  typedef VmaPair<KeyT, ValueT> PairType;
4382  typedef PairType* iterator;
4383 
4384  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4385 
4386  iterator begin() { return m_Vector.begin(); }
4387  iterator end() { return m_Vector.end(); }
4388 
4389  void insert(const PairType& pair);
4390  iterator find(const KeyT& key);
4391  void erase(iterator it);
4392 
4393 private:
4394  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4395 };
4396 
4397 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4398 
4399 template<typename FirstT, typename SecondT>
4400 struct VmaPairFirstLess
4401 {
4402  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4403  {
4404  return lhs.first < rhs.first;
4405  }
4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4407  {
4408  return lhs.first < rhsFirst;
4409  }
4410 };
4411 
4412 template<typename KeyT, typename ValueT>
4413 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4414 {
4415  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4416  m_Vector.data(),
4417  m_Vector.data() + m_Vector.size(),
4418  pair,
4419  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4420  VmaVectorInsert(m_Vector, indexToInsert, pair);
4421 }
4422 
4423 template<typename KeyT, typename ValueT>
4424 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4425 {
4426  PairType* it = VmaBinaryFindFirstNotLess(
4427  m_Vector.data(),
4428  m_Vector.data() + m_Vector.size(),
4429  key,
4430  VmaPairFirstLess<KeyT, ValueT>());
4431  if((it != m_Vector.end()) && (it->first == key))
4432  {
4433  return it;
4434  }
4435  else
4436  {
4437  return m_Vector.end();
4438  }
4439 }
4440 
4441 template<typename KeyT, typename ValueT>
4442 void VmaMap<KeyT, ValueT>::erase(iterator it)
4443 {
4444  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4445 }
4446 
4447 #endif // #if VMA_USE_STL_UNORDERED_MAP
4448 
4449 #endif // #if 0
4450 
4452 
4453 class VmaDeviceMemoryBlock;
4454 
4455 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4456 
4457 struct VmaAllocation_T
4458 {
4459  VMA_CLASS_NO_COPY(VmaAllocation_T)
4460 private:
4461  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4462 
4463  enum FLAGS
4464  {
4465  FLAG_USER_DATA_STRING = 0x01,
4466  };
4467 
4468 public:
4469  enum ALLOCATION_TYPE
4470  {
4471  ALLOCATION_TYPE_NONE,
4472  ALLOCATION_TYPE_BLOCK,
4473  ALLOCATION_TYPE_DEDICATED,
4474  };
4475 
4476  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4477  m_Alignment(1),
4478  m_Size(0),
4479  m_pUserData(VMA_NULL),
4480  m_LastUseFrameIndex(currentFrameIndex),
4481  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4482  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4483  m_MapCount(0),
4484  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4485  {
4486 #if VMA_STATS_STRING_ENABLED
4487  m_CreationFrameIndex = currentFrameIndex;
4488  m_BufferImageUsage = 0;
4489 #endif
4490  }
4491 
4492  ~VmaAllocation_T()
4493  {
4494  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4495 
4496  // Check if owned string was freed.
4497  VMA_ASSERT(m_pUserData == VMA_NULL);
4498  }
4499 
4500  void InitBlockAllocation(
4501  VmaPool hPool,
4502  VmaDeviceMemoryBlock* block,
4503  VkDeviceSize offset,
4504  VkDeviceSize alignment,
4505  VkDeviceSize size,
4506  VmaSuballocationType suballocationType,
4507  bool mapped,
4508  bool canBecomeLost)
4509  {
4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4511  VMA_ASSERT(block != VMA_NULL);
4512  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4513  m_Alignment = alignment;
4514  m_Size = size;
4515  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4516  m_SuballocationType = (uint8_t)suballocationType;
4517  m_BlockAllocation.m_hPool = hPool;
4518  m_BlockAllocation.m_Block = block;
4519  m_BlockAllocation.m_Offset = offset;
4520  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4521  }
4522 
4523  void InitLost()
4524  {
4525  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4526  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4527  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4528  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4529  m_BlockAllocation.m_Block = VMA_NULL;
4530  m_BlockAllocation.m_Offset = 0;
4531  m_BlockAllocation.m_CanBecomeLost = true;
4532  }
4533 
4534  void ChangeBlockAllocation(
4535  VmaAllocator hAllocator,
4536  VmaDeviceMemoryBlock* block,
4537  VkDeviceSize offset);
4538 
4539  void ChangeSize(VkDeviceSize newSize);
4540 
4541  // pMappedData not null means allocation is created with MAPPED flag.
4542  void InitDedicatedAllocation(
4543  uint32_t memoryTypeIndex,
4544  VkDeviceMemory hMemory,
4545  VmaSuballocationType suballocationType,
4546  void* pMappedData,
4547  VkDeviceSize size)
4548  {
4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4550  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4551  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4552  m_Alignment = 0;
4553  m_Size = size;
4554  m_SuballocationType = (uint8_t)suballocationType;
4555  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4556  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4557  m_DedicatedAllocation.m_hMemory = hMemory;
4558  m_DedicatedAllocation.m_pMappedData = pMappedData;
4559  }
4560 
4561  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4562  VkDeviceSize GetAlignment() const { return m_Alignment; }
4563  VkDeviceSize GetSize() const { return m_Size; }
4564  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4565  void* GetUserData() const { return m_pUserData; }
4566  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4567  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4568 
4569  VmaDeviceMemoryBlock* GetBlock() const
4570  {
4571  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4572  return m_BlockAllocation.m_Block;
4573  }
4574  VkDeviceSize GetOffset() const;
4575  VkDeviceMemory GetMemory() const;
4576  uint32_t GetMemoryTypeIndex() const;
4577  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4578  void* GetMappedData() const;
4579  bool CanBecomeLost() const;
4580  VmaPool GetPool() const;
4581 
4582  uint32_t GetLastUseFrameIndex() const
4583  {
4584  return m_LastUseFrameIndex.load();
4585  }
4586  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4587  {
4588  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4589  }
4590  /*
4591  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4592  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4593  - Else, returns false.
4594 
4595  If hAllocation is already lost, assert - you should not call it then.
4596  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4597  */
4598  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4599 
4600  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4601  {
4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4603  outInfo.blockCount = 1;
4604  outInfo.allocationCount = 1;
4605  outInfo.unusedRangeCount = 0;
4606  outInfo.usedBytes = m_Size;
4607  outInfo.unusedBytes = 0;
4608  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4609  outInfo.unusedRangeSizeMin = UINT64_MAX;
4610  outInfo.unusedRangeSizeMax = 0;
4611  }
4612 
4613  void BlockAllocMap();
4614  void BlockAllocUnmap();
4615  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4616  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4617 
4618 #if VMA_STATS_STRING_ENABLED
4619  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4620  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4621 
4622  void InitBufferImageUsage(uint32_t bufferImageUsage)
4623  {
4624  VMA_ASSERT(m_BufferImageUsage == 0);
4625  m_BufferImageUsage = bufferImageUsage;
4626  }
4627 
4628  void PrintParameters(class VmaJsonWriter& json) const;
4629 #endif
4630 
4631 private:
4632  VkDeviceSize m_Alignment;
4633  VkDeviceSize m_Size;
4634  void* m_pUserData;
4635  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4636  uint8_t m_Type; // ALLOCATION_TYPE
4637  uint8_t m_SuballocationType; // VmaSuballocationType
4638  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4639  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4640  uint8_t m_MapCount;
4641  uint8_t m_Flags; // enum FLAGS
4642 
4643  // Allocation out of VmaDeviceMemoryBlock.
4644  struct BlockAllocation
4645  {
4646  VmaPool m_hPool; // Null if belongs to general memory.
4647  VmaDeviceMemoryBlock* m_Block;
4648  VkDeviceSize m_Offset;
4649  bool m_CanBecomeLost;
4650  };
4651 
4652  // Allocation for an object that has its own private VkDeviceMemory.
4653  struct DedicatedAllocation
4654  {
4655  uint32_t m_MemoryTypeIndex;
4656  VkDeviceMemory m_hMemory;
4657  void* m_pMappedData; // Not null means memory is mapped.
4658  };
4659 
4660  union
4661  {
4662  // Allocation out of VmaDeviceMemoryBlock.
4663  BlockAllocation m_BlockAllocation;
4664  // Allocation for an object that has its own private VkDeviceMemory.
4665  DedicatedAllocation m_DedicatedAllocation;
4666  };
4667 
4668 #if VMA_STATS_STRING_ENABLED
4669  uint32_t m_CreationFrameIndex;
4670  uint32_t m_BufferImageUsage; // 0 if unknown.
4671 #endif
4672 
4673  void FreeUserDataString(VmaAllocator hAllocator);
4674 };
4675 
4676 /*
4677 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4678 allocated memory block or free.
4679 */
4680 struct VmaSuballocation
4681 {
4682  VkDeviceSize offset;
4683  VkDeviceSize size;
4684  VmaAllocation hAllocation;
4685  VmaSuballocationType type;
4686 };
4687 
4688 // Comparator for offsets.
4689 struct VmaSuballocationOffsetLess
4690 {
4691  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4692  {
4693  return lhs.offset < rhs.offset;
4694  }
4695 };
4696 struct VmaSuballocationOffsetGreater
4697 {
4698  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4699  {
4700  return lhs.offset > rhs.offset;
4701  }
4702 };
4703 
4704 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4705 
4706 // Cost of one additional allocation lost, as equivalent in bytes.
4707 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4708 
4709 /*
4710 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4711 
4712 If canMakeOtherLost was false:
4713 - item points to a FREE suballocation.
4714 - itemsToMakeLostCount is 0.
4715 
4716 If canMakeOtherLost was true:
4717 - item points to first of sequence of suballocations, which are either FREE,
4718  or point to VmaAllocations that can become lost.
4719 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4720  the requested allocation to succeed.
4721 */
4722 struct VmaAllocationRequest
4723 {
4724  VkDeviceSize offset;
4725  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4726  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4727  VmaSuballocationList::iterator item;
4728  size_t itemsToMakeLostCount;
4729  void* customData;
4730 
4731  VkDeviceSize CalcCost() const
4732  {
4733  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4734  }
4735 };
4736 
4737 /*
4738 Data structure used for bookkeeping of allocations and unused ranges of memory
4739 in a single VkDeviceMemory block.
4740 */
4741 class VmaBlockMetadata
4742 {
4743 public:
4744  VmaBlockMetadata(VmaAllocator hAllocator);
4745  virtual ~VmaBlockMetadata() { }
4746  virtual void Init(VkDeviceSize size) { m_Size = size; }
4747 
4748  // Validates all data structures inside this object. If not valid, returns false.
4749  virtual bool Validate() const = 0;
4750  VkDeviceSize GetSize() const { return m_Size; }
4751  virtual size_t GetAllocationCount() const = 0;
4752  virtual VkDeviceSize GetSumFreeSize() const = 0;
4753  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4754  // Returns true if this block is empty - contains only single free suballocation.
4755  virtual bool IsEmpty() const = 0;
4756 
4757  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4758  // Shouldn't modify blockCount.
4759  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4760 
4761 #if VMA_STATS_STRING_ENABLED
4762  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4763 #endif
4764 
4765  // Tries to find a place for suballocation with given parameters inside this block.
4766  // If succeeded, fills pAllocationRequest and returns true.
4767  // If failed, returns false.
4768  virtual bool CreateAllocationRequest(
4769  uint32_t currentFrameIndex,
4770  uint32_t frameInUseCount,
4771  VkDeviceSize bufferImageGranularity,
4772  VkDeviceSize allocSize,
4773  VkDeviceSize allocAlignment,
4774  bool upperAddress,
4775  VmaSuballocationType allocType,
4776  bool canMakeOtherLost,
4777  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4778  VmaAllocationRequest* pAllocationRequest) = 0;
4779 
4780  virtual bool MakeRequestedAllocationsLost(
4781  uint32_t currentFrameIndex,
4782  uint32_t frameInUseCount,
4783  VmaAllocationRequest* pAllocationRequest) = 0;
4784 
4785  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4786 
4787  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4788 
4789  // Makes actual allocation based on request. Request must already be checked and valid.
4790  virtual void Alloc(
4791  const VmaAllocationRequest& request,
4792  VmaSuballocationType type,
4793  VkDeviceSize allocSize,
4794  bool upperAddress,
4795  VmaAllocation hAllocation) = 0;
4796 
4797  // Frees suballocation assigned to given memory region.
4798  virtual void Free(const VmaAllocation allocation) = 0;
4799  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4800 
4801  // Tries to resize (grow or shrink) space for given allocation, in place.
4802  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4803 
4804 protected:
4805  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4806 
4807 #if VMA_STATS_STRING_ENABLED
4808  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4809  VkDeviceSize unusedBytes,
4810  size_t allocationCount,
4811  size_t unusedRangeCount) const;
4812  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4813  VkDeviceSize offset,
4814  VmaAllocation hAllocation) const;
4815  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4816  VkDeviceSize offset,
4817  VkDeviceSize size) const;
4818  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4819 #endif
4820 
4821 private:
4822  VkDeviceSize m_Size;
4823  const VkAllocationCallbacks* m_pAllocationCallbacks;
4824 };
4825 
4826 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4827  VMA_ASSERT(0 && "Validation failed: " #cond); \
4828  return false; \
4829  } } while(false)
4830 
4831 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4832 {
4833  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4834 public:
4835  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4836  virtual ~VmaBlockMetadata_Generic();
4837  virtual void Init(VkDeviceSize size);
4838 
4839  virtual bool Validate() const;
4840  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4841  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4842  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4843  virtual bool IsEmpty() const;
4844 
4845  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4846  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4847 
4848 #if VMA_STATS_STRING_ENABLED
4849  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4850 #endif
4851 
4852  virtual bool CreateAllocationRequest(
4853  uint32_t currentFrameIndex,
4854  uint32_t frameInUseCount,
4855  VkDeviceSize bufferImageGranularity,
4856  VkDeviceSize allocSize,
4857  VkDeviceSize allocAlignment,
4858  bool upperAddress,
4859  VmaSuballocationType allocType,
4860  bool canMakeOtherLost,
4861  uint32_t strategy,
4862  VmaAllocationRequest* pAllocationRequest);
4863 
4864  virtual bool MakeRequestedAllocationsLost(
4865  uint32_t currentFrameIndex,
4866  uint32_t frameInUseCount,
4867  VmaAllocationRequest* pAllocationRequest);
4868 
4869  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4870 
4871  virtual VkResult CheckCorruption(const void* pBlockData);
4872 
4873  virtual void Alloc(
4874  const VmaAllocationRequest& request,
4875  VmaSuballocationType type,
4876  VkDeviceSize allocSize,
4877  bool upperAddress,
4878  VmaAllocation hAllocation);
4879 
4880  virtual void Free(const VmaAllocation allocation);
4881  virtual void FreeAtOffset(VkDeviceSize offset);
4882 
4883  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4884 
4885 private:
4886  uint32_t m_FreeCount;
4887  VkDeviceSize m_SumFreeSize;
4888  VmaSuballocationList m_Suballocations;
4889  // Suballocations that are free and have size greater than certain threshold.
4890  // Sorted by size, ascending.
4891  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4892 
4893  bool ValidateFreeSuballocationList() const;
4894 
4895  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4896  // If yes, fills pOffset and returns true. If no, returns false.
4897  bool CheckAllocation(
4898  uint32_t currentFrameIndex,
4899  uint32_t frameInUseCount,
4900  VkDeviceSize bufferImageGranularity,
4901  VkDeviceSize allocSize,
4902  VkDeviceSize allocAlignment,
4903  VmaSuballocationType allocType,
4904  VmaSuballocationList::const_iterator suballocItem,
4905  bool canMakeOtherLost,
4906  VkDeviceSize* pOffset,
4907  size_t* itemsToMakeLostCount,
4908  VkDeviceSize* pSumFreeSize,
4909  VkDeviceSize* pSumItemSize) const;
4910  // Given free suballocation, it merges it with following one, which must also be free.
4911  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4912  // Releases given suballocation, making it free.
4913  // Merges it with adjacent free suballocations if applicable.
4914  // Returns iterator to new free suballocation at this place.
4915  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4916  // Given free suballocation, it inserts it into sorted list of
4917  // m_FreeSuballocationsBySize if it's suitable.
4918  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4919  // Given free suballocation, it removes it from sorted list of
4920  // m_FreeSuballocationsBySize if it's suitable.
4921  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4922 };
4923 
4924 /*
4925 Allocations and their references in internal data structure look like this:
4926 
4927 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4928 
4929  0 +-------+
4930  | |
4931  | |
4932  | |
4933  +-------+
4934  | Alloc | 1st[m_1stNullItemsBeginCount]
4935  +-------+
4936  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4937  +-------+
4938  | ... |
4939  +-------+
4940  | Alloc | 1st[1st.size() - 1]
4941  +-------+
4942  | |
4943  | |
4944  | |
4945 GetSize() +-------+
4946 
4947 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4948 
4949  0 +-------+
4950  | Alloc | 2nd[0]
4951  +-------+
4952  | Alloc | 2nd[1]
4953  +-------+
4954  | ... |
4955  +-------+
4956  | Alloc | 2nd[2nd.size() - 1]
4957  +-------+
4958  | |
4959  | |
4960  | |
4961  +-------+
4962  | Alloc | 1st[m_1stNullItemsBeginCount]
4963  +-------+
4964  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4965  +-------+
4966  | ... |
4967  +-------+
4968  | Alloc | 1st[1st.size() - 1]
4969  +-------+
4970  | |
4971 GetSize() +-------+
4972 
4973 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4974 
4975  0 +-------+
4976  | |
4977  | |
4978  | |
4979  +-------+
4980  | Alloc | 1st[m_1stNullItemsBeginCount]
4981  +-------+
4982  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4983  +-------+
4984  | ... |
4985  +-------+
4986  | Alloc | 1st[1st.size() - 1]
4987  +-------+
4988  | |
4989  | |
4990  | |
4991  +-------+
4992  | Alloc | 2nd[2nd.size() - 1]
4993  +-------+
4994  | ... |
4995  +-------+
4996  | Alloc | 2nd[1]
4997  +-------+
4998  | Alloc | 2nd[0]
4999 GetSize() +-------+
5000 
5001 */
5002 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5003 {
5004  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5005 public:
5006  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5007  virtual ~VmaBlockMetadata_Linear();
5008  virtual void Init(VkDeviceSize size);
5009 
5010  virtual bool Validate() const;
5011  virtual size_t GetAllocationCount() const;
5012  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5013  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5014  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5015 
5016  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5017  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5018 
5019 #if VMA_STATS_STRING_ENABLED
5020  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5021 #endif
5022 
5023  virtual bool CreateAllocationRequest(
5024  uint32_t currentFrameIndex,
5025  uint32_t frameInUseCount,
5026  VkDeviceSize bufferImageGranularity,
5027  VkDeviceSize allocSize,
5028  VkDeviceSize allocAlignment,
5029  bool upperAddress,
5030  VmaSuballocationType allocType,
5031  bool canMakeOtherLost,
5032  uint32_t strategy,
5033  VmaAllocationRequest* pAllocationRequest);
5034 
5035  virtual bool MakeRequestedAllocationsLost(
5036  uint32_t currentFrameIndex,
5037  uint32_t frameInUseCount,
5038  VmaAllocationRequest* pAllocationRequest);
5039 
5040  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5041 
5042  virtual VkResult CheckCorruption(const void* pBlockData);
5043 
5044  virtual void Alloc(
5045  const VmaAllocationRequest& request,
5046  VmaSuballocationType type,
5047  VkDeviceSize allocSize,
5048  bool upperAddress,
5049  VmaAllocation hAllocation);
5050 
5051  virtual void Free(const VmaAllocation allocation);
5052  virtual void FreeAtOffset(VkDeviceSize offset);
5053 
5054 private:
5055  /*
5056  There are two suballocation vectors, used in ping-pong way.
5057  The one with index m_1stVectorIndex is called 1st.
5058  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5059  2nd can be non-empty only when 1st is not empty.
5060  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5061  */
5062  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5063 
5064  enum SECOND_VECTOR_MODE
5065  {
5066  SECOND_VECTOR_EMPTY,
5067  /*
5068  Suballocations in 2nd vector are created later than the ones in 1st, but they
5069  all have smaller offset.
5070  */
5071  SECOND_VECTOR_RING_BUFFER,
5072  /*
5073  Suballocations in 2nd vector are upper side of double stack.
5074  They all have offsets higher than those in 1st vector.
5075  Top of this stack means smaller offsets, but higher indices in this vector.
5076  */
5077  SECOND_VECTOR_DOUBLE_STACK,
5078  };
5079 
5080  VkDeviceSize m_SumFreeSize;
5081  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5082  uint32_t m_1stVectorIndex;
5083  SECOND_VECTOR_MODE m_2ndVectorMode;
5084 
5085  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5086  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5087  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5088  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5089 
5090  // Number of items in 1st vector with hAllocation = null at the beginning.
5091  size_t m_1stNullItemsBeginCount;
5092  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5093  size_t m_1stNullItemsMiddleCount;
5094  // Number of items in 2nd vector with hAllocation = null.
5095  size_t m_2ndNullItemsCount;
5096 
5097  bool ShouldCompact1st() const;
5098  void CleanupAfterFree();
5099 };
5100 
5101 /*
5102 - GetSize() is the original size of allocated memory block.
5103 - m_UsableSize is this size aligned down to a power of two.
5104  All allocations and calculations happen relative to m_UsableSize.
5105 - GetUnusableSize() is the difference between them.
5106  It is repoted as separate, unused range, not available for allocations.
5107 
5108 Node at level 0 has size = m_UsableSize.
5109 Each next level contains nodes with size 2 times smaller than current level.
5110 m_LevelCount is the maximum number of levels to use in the current object.
5111 */
5112 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5113 {
5114  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5115 public:
5116  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5117  virtual ~VmaBlockMetadata_Buddy();
5118  virtual void Init(VkDeviceSize size);
5119 
5120  virtual bool Validate() const;
5121  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5122  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5123  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5124  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5125 
5126  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5127  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5128 
5129 #if VMA_STATS_STRING_ENABLED
5130  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5131 #endif
5132 
5133  virtual bool CreateAllocationRequest(
5134  uint32_t currentFrameIndex,
5135  uint32_t frameInUseCount,
5136  VkDeviceSize bufferImageGranularity,
5137  VkDeviceSize allocSize,
5138  VkDeviceSize allocAlignment,
5139  bool upperAddress,
5140  VmaSuballocationType allocType,
5141  bool canMakeOtherLost,
5142  uint32_t strategy,
5143  VmaAllocationRequest* pAllocationRequest);
5144 
5145  virtual bool MakeRequestedAllocationsLost(
5146  uint32_t currentFrameIndex,
5147  uint32_t frameInUseCount,
5148  VmaAllocationRequest* pAllocationRequest);
5149 
5150  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5151 
5152  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5153 
5154  virtual void Alloc(
5155  const VmaAllocationRequest& request,
5156  VmaSuballocationType type,
5157  VkDeviceSize allocSize,
5158  bool upperAddress,
5159  VmaAllocation hAllocation);
5160 
5161  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5162  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5163 
5164 private:
5165  static const VkDeviceSize MIN_NODE_SIZE = 32;
5166  static const size_t MAX_LEVELS = 30;
5167 
5168  struct ValidationContext
5169  {
5170  size_t calculatedAllocationCount;
5171  size_t calculatedFreeCount;
5172  VkDeviceSize calculatedSumFreeSize;
5173 
5174  ValidationContext() :
5175  calculatedAllocationCount(0),
5176  calculatedFreeCount(0),
5177  calculatedSumFreeSize(0) { }
5178  };
5179 
5180  struct Node
5181  {
5182  VkDeviceSize offset;
5183  enum TYPE
5184  {
5185  TYPE_FREE,
5186  TYPE_ALLOCATION,
5187  TYPE_SPLIT,
5188  TYPE_COUNT
5189  } type;
5190  Node* parent;
5191  Node* buddy;
5192 
5193  union
5194  {
5195  struct
5196  {
5197  Node* prev;
5198  Node* next;
5199  } free;
5200  struct
5201  {
5202  VmaAllocation alloc;
5203  } allocation;
5204  struct
5205  {
5206  Node* leftChild;
5207  } split;
5208  };
5209  };
5210 
5211  // Size of the memory block aligned down to a power of two.
5212  VkDeviceSize m_UsableSize;
5213  uint32_t m_LevelCount;
5214 
5215  Node* m_Root;
5216  struct {
5217  Node* front;
5218  Node* back;
5219  } m_FreeList[MAX_LEVELS];
5220  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5221  size_t m_AllocationCount;
5222  // Number of nodes in the tree with type == TYPE_FREE.
5223  size_t m_FreeCount;
5224  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5225  VkDeviceSize m_SumFreeSize;
5226 
5227  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5228  void DeleteNode(Node* node);
5229  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5230  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5231  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5232  // Alloc passed just for validation. Can be null.
5233  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5234  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5235  // Adds node to the front of FreeList at given level.
5236  // node->type must be FREE.
5237  // node->free.prev, next can be undefined.
5238  void AddToFreeListFront(uint32_t level, Node* node);
5239  // Removes node from FreeList at given level.
5240  // node->type must be FREE.
5241  // node->free.prev, next stay untouched.
5242  void RemoveFromFreeList(uint32_t level, Node* node);
5243 
5244 #if VMA_STATS_STRING_ENABLED
5245  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5246 #endif
5247 };
5248 
5249 /*
5250 Represents a single block of device memory (`VkDeviceMemory`) with all the
5251 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5252 
5253 Thread-safety: This class must be externally synchronized.
5254 */
5255 class VmaDeviceMemoryBlock
5256 {
5257  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5258 public:
5259  VmaBlockMetadata* m_pMetadata;
5260 
5261  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5262 
5263  ~VmaDeviceMemoryBlock()
5264  {
5265  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5266  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5267  }
5268 
5269  // Always call after construction.
5270  void Init(
5271  VmaAllocator hAllocator,
5272  uint32_t newMemoryTypeIndex,
5273  VkDeviceMemory newMemory,
5274  VkDeviceSize newSize,
5275  uint32_t id,
5276  uint32_t algorithm);
5277  // Always call before destruction.
5278  void Destroy(VmaAllocator allocator);
5279 
5280  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5281  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5282  uint32_t GetId() const { return m_Id; }
5283  void* GetMappedData() const { return m_pMappedData; }
5284 
5285  // Validates all data structures inside this object. If not valid, returns false.
5286  bool Validate() const;
5287 
5288  VkResult CheckCorruption(VmaAllocator hAllocator);
5289 
5290  // ppData can be null.
5291  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5292  void Unmap(VmaAllocator hAllocator, uint32_t count);
5293 
5294  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5295  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5296 
5297  VkResult BindBufferMemory(
5298  const VmaAllocator hAllocator,
5299  const VmaAllocation hAllocation,
5300  VkBuffer hBuffer);
5301  VkResult BindImageMemory(
5302  const VmaAllocator hAllocator,
5303  const VmaAllocation hAllocation,
5304  VkImage hImage);
5305 
5306 private:
5307  uint32_t m_MemoryTypeIndex;
5308  uint32_t m_Id;
5309  VkDeviceMemory m_hMemory;
5310 
5311  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5312  // Also protects m_MapCount, m_pMappedData.
5313  VMA_MUTEX m_Mutex;
5314  uint32_t m_MapCount;
5315  void* m_pMappedData;
5316 };
5317 
5318 struct VmaPointerLess
5319 {
5320  bool operator()(const void* lhs, const void* rhs) const
5321  {
5322  return lhs < rhs;
5323  }
5324 };
5325 
5326 class VmaDefragmentator;
5327 
5328 /*
5329 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5330 Vulkan memory type.
5331 
5332 Synchronized internally with a mutex.
5333 */
5334 struct VmaBlockVector
5335 {
5336  VMA_CLASS_NO_COPY(VmaBlockVector)
5337 public:
5338  VmaBlockVector(
5339  VmaAllocator hAllocator,
5340  uint32_t memoryTypeIndex,
5341  VkDeviceSize preferredBlockSize,
5342  size_t minBlockCount,
5343  size_t maxBlockCount,
5344  VkDeviceSize bufferImageGranularity,
5345  uint32_t frameInUseCount,
5346  bool isCustomPool,
5347  bool explicitBlockSize,
5348  uint32_t algorithm);
5349  ~VmaBlockVector();
5350 
5351  VkResult CreateMinBlocks();
5352 
5353  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5354  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5355  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5356  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5357  uint32_t GetAlgorithm() const { return m_Algorithm; }
5358 
5359  void GetPoolStats(VmaPoolStats* pStats);
5360 
5361  bool IsEmpty() const { return m_Blocks.empty(); }
5362  bool IsCorruptionDetectionEnabled() const;
5363 
5364  VkResult Allocate(
5365  VmaPool hCurrentPool,
5366  uint32_t currentFrameIndex,
5367  VkDeviceSize size,
5368  VkDeviceSize alignment,
5369  const VmaAllocationCreateInfo& createInfo,
5370  VmaSuballocationType suballocType,
5371  VmaAllocation* pAllocation);
5372 
5373  void Free(
5374  VmaAllocation hAllocation);
5375 
5376  // Adds statistics of this BlockVector to pStats.
5377  void AddStats(VmaStats* pStats);
5378 
5379 #if VMA_STATS_STRING_ENABLED
5380  void PrintDetailedMap(class VmaJsonWriter& json);
5381 #endif
5382 
5383  void MakePoolAllocationsLost(
5384  uint32_t currentFrameIndex,
5385  size_t* pLostAllocationCount);
5386  VkResult CheckCorruption();
5387 
5388  VmaDefragmentator* EnsureDefragmentator(
5389  VmaAllocator hAllocator,
5390  uint32_t currentFrameIndex);
5391 
5392  VkResult Defragment(
5393  VmaDefragmentationStats* pDefragmentationStats,
5394  VkDeviceSize& maxBytesToMove,
5395  uint32_t& maxAllocationsToMove);
5396 
5397  void DestroyDefragmentator();
5398 
5399 private:
5400  friend class VmaDefragmentator;
5401 
5402  const VmaAllocator m_hAllocator;
5403  const uint32_t m_MemoryTypeIndex;
5404  const VkDeviceSize m_PreferredBlockSize;
5405  const size_t m_MinBlockCount;
5406  const size_t m_MaxBlockCount;
5407  const VkDeviceSize m_BufferImageGranularity;
5408  const uint32_t m_FrameInUseCount;
5409  const bool m_IsCustomPool;
5410  const bool m_ExplicitBlockSize;
5411  const uint32_t m_Algorithm;
5412  bool m_HasEmptyBlock;
5413  VMA_MUTEX m_Mutex;
5414  // Incrementally sorted by sumFreeSize, ascending.
5415  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5416  /* There can be at most one allocation that is completely empty - a
5417  hysteresis to avoid pessimistic case of alternating creation and destruction
5418  of a VkDeviceMemory. */
5419  VmaDefragmentator* m_pDefragmentator;
5420  uint32_t m_NextBlockId;
5421 
5422  VkDeviceSize CalcMaxBlockSize() const;
5423 
5424  // Finds and removes given block from vector.
5425  void Remove(VmaDeviceMemoryBlock* pBlock);
5426 
5427  // Performs single step in sorting m_Blocks. They may not be fully sorted
5428  // after this call.
5429  void IncrementallySortBlocks();
5430 
5431  // To be used only without CAN_MAKE_OTHER_LOST flag.
5432  VkResult AllocateFromBlock(
5433  VmaDeviceMemoryBlock* pBlock,
5434  VmaPool hCurrentPool,
5435  uint32_t currentFrameIndex,
5436  VkDeviceSize size,
5437  VkDeviceSize alignment,
5438  VmaAllocationCreateFlags allocFlags,
5439  void* pUserData,
5440  VmaSuballocationType suballocType,
5441  uint32_t strategy,
5442  VmaAllocation* pAllocation);
5443 
5444  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5445 };
5446 
5447 struct VmaPool_T
5448 {
5449  VMA_CLASS_NO_COPY(VmaPool_T)
5450 public:
5451  VmaBlockVector m_BlockVector;
5452 
5453  VmaPool_T(
5454  VmaAllocator hAllocator,
5455  const VmaPoolCreateInfo& createInfo,
5456  VkDeviceSize preferredBlockSize);
5457  ~VmaPool_T();
5458 
5459  uint32_t GetId() const { return m_Id; }
5460  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5461 
5462 #if VMA_STATS_STRING_ENABLED
5463  //void PrintDetailedMap(class VmaStringBuilder& sb);
5464 #endif
5465 
5466 private:
5467  uint32_t m_Id;
5468 };
5469 
5470 class VmaDefragmentator
5471 {
5472  VMA_CLASS_NO_COPY(VmaDefragmentator)
5473 private:
5474  const VmaAllocator m_hAllocator;
5475  VmaBlockVector* const m_pBlockVector;
5476  uint32_t m_CurrentFrameIndex;
5477  VkDeviceSize m_BytesMoved;
5478  uint32_t m_AllocationsMoved;
5479 
5480  struct AllocationInfo
5481  {
5482  VmaAllocation m_hAllocation;
5483  VkBool32* m_pChanged;
5484 
5485  AllocationInfo() :
5486  m_hAllocation(VK_NULL_HANDLE),
5487  m_pChanged(VMA_NULL)
5488  {
5489  }
5490  };
5491 
5492  struct AllocationInfoSizeGreater
5493  {
5494  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5495  {
5496  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5497  }
5498  };
5499 
5500  // Used between AddAllocation and Defragment.
5501  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5502 
5503  struct BlockInfo
5504  {
5505  VmaDeviceMemoryBlock* m_pBlock;
5506  bool m_HasNonMovableAllocations;
5507  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5508 
5509  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5510  m_pBlock(VMA_NULL),
5511  m_HasNonMovableAllocations(true),
5512  m_Allocations(pAllocationCallbacks),
5513  m_pMappedDataForDefragmentation(VMA_NULL)
5514  {
5515  }
5516 
5517  void CalcHasNonMovableAllocations()
5518  {
5519  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5520  const size_t defragmentAllocCount = m_Allocations.size();
5521  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5522  }
5523 
5524  void SortAllocationsBySizeDescecnding()
5525  {
5526  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5527  }
5528 
5529  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5530  void Unmap(VmaAllocator hAllocator);
5531 
5532  private:
5533  // Not null if mapped for defragmentation only, not originally mapped.
5534  void* m_pMappedDataForDefragmentation;
5535  };
5536 
5537  struct BlockPointerLess
5538  {
5539  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5540  {
5541  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5542  }
5543  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5544  {
5545  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5546  }
5547  };
5548 
5549  // 1. Blocks with some non-movable allocations go first.
5550  // 2. Blocks with smaller sumFreeSize go first.
5551  struct BlockInfoCompareMoveDestination
5552  {
5553  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5554  {
5555  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5556  {
5557  return true;
5558  }
5559  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5560  {
5561  return false;
5562  }
5563  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5564  {
5565  return true;
5566  }
5567  return false;
5568  }
5569  };
5570 
5571  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5572  BlockInfoVector m_Blocks;
5573 
5574  VkResult DefragmentRound(
5575  VkDeviceSize maxBytesToMove,
5576  uint32_t maxAllocationsToMove);
5577 
5578  static bool MoveMakesSense(
5579  size_t dstBlockIndex, VkDeviceSize dstOffset,
5580  size_t srcBlockIndex, VkDeviceSize srcOffset);
5581 
5582 public:
5583  VmaDefragmentator(
5584  VmaAllocator hAllocator,
5585  VmaBlockVector* pBlockVector,
5586  uint32_t currentFrameIndex);
5587 
5588  ~VmaDefragmentator();
5589 
5590  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5591  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5592 
5593  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5594 
5595  VkResult Defragment(
5596  VkDeviceSize maxBytesToMove,
5597  uint32_t maxAllocationsToMove);
5598 };
5599 
5600 #if VMA_RECORDING_ENABLED
5601 
5602 class VmaRecorder
5603 {
5604 public:
5605  VmaRecorder();
5606  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5607  void WriteConfiguration(
5608  const VkPhysicalDeviceProperties& devProps,
5609  const VkPhysicalDeviceMemoryProperties& memProps,
5610  bool dedicatedAllocationExtensionEnabled);
5611  ~VmaRecorder();
5612 
5613  void RecordCreateAllocator(uint32_t frameIndex);
5614  void RecordDestroyAllocator(uint32_t frameIndex);
5615  void RecordCreatePool(uint32_t frameIndex,
5616  const VmaPoolCreateInfo& createInfo,
5617  VmaPool pool);
5618  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5619  void RecordAllocateMemory(uint32_t frameIndex,
5620  const VkMemoryRequirements& vkMemReq,
5621  const VmaAllocationCreateInfo& createInfo,
5622  VmaAllocation allocation);
5623  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5624  const VkMemoryRequirements& vkMemReq,
5625  bool requiresDedicatedAllocation,
5626  bool prefersDedicatedAllocation,
5627  const VmaAllocationCreateInfo& createInfo,
5628  VmaAllocation allocation);
5629  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5630  const VkMemoryRequirements& vkMemReq,
5631  bool requiresDedicatedAllocation,
5632  bool prefersDedicatedAllocation,
5633  const VmaAllocationCreateInfo& createInfo,
5634  VmaAllocation allocation);
5635  void RecordFreeMemory(uint32_t frameIndex,
5636  VmaAllocation allocation);
5637  void RecordResizeAllocation(
5638  uint32_t frameIndex,
5639  VmaAllocation allocation,
5640  VkDeviceSize newSize);
5641  void RecordSetAllocationUserData(uint32_t frameIndex,
5642  VmaAllocation allocation,
5643  const void* pUserData);
5644  void RecordCreateLostAllocation(uint32_t frameIndex,
5645  VmaAllocation allocation);
5646  void RecordMapMemory(uint32_t frameIndex,
5647  VmaAllocation allocation);
5648  void RecordUnmapMemory(uint32_t frameIndex,
5649  VmaAllocation allocation);
5650  void RecordFlushAllocation(uint32_t frameIndex,
5651  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5652  void RecordInvalidateAllocation(uint32_t frameIndex,
5653  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5654  void RecordCreateBuffer(uint32_t frameIndex,
5655  const VkBufferCreateInfo& bufCreateInfo,
5656  const VmaAllocationCreateInfo& allocCreateInfo,
5657  VmaAllocation allocation);
5658  void RecordCreateImage(uint32_t frameIndex,
5659  const VkImageCreateInfo& imageCreateInfo,
5660  const VmaAllocationCreateInfo& allocCreateInfo,
5661  VmaAllocation allocation);
5662  void RecordDestroyBuffer(uint32_t frameIndex,
5663  VmaAllocation allocation);
5664  void RecordDestroyImage(uint32_t frameIndex,
5665  VmaAllocation allocation);
5666  void RecordTouchAllocation(uint32_t frameIndex,
5667  VmaAllocation allocation);
5668  void RecordGetAllocationInfo(uint32_t frameIndex,
5669  VmaAllocation allocation);
5670  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5671  VmaPool pool);
5672 
5673 private:
5674  struct CallParams
5675  {
5676  uint32_t threadId;
5677  double time;
5678  };
5679 
5680  class UserDataString
5681  {
5682  public:
5683  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5684  const char* GetString() const { return m_Str; }
5685 
5686  private:
5687  char m_PtrStr[17];
5688  const char* m_Str;
5689  };
5690 
5691  bool m_UseMutex;
5692  VmaRecordFlags m_Flags;
5693  FILE* m_File;
5694  VMA_MUTEX m_FileMutex;
5695  int64_t m_Freq;
5696  int64_t m_StartCounter;
5697 
5698  void GetBasicParams(CallParams& outParams);
5699  void Flush();
5700 };
5701 
5702 #endif // #if VMA_RECORDING_ENABLED
5703 
5704 // Main allocator object.
5705 struct VmaAllocator_T
5706 {
5707  VMA_CLASS_NO_COPY(VmaAllocator_T)
5708 public:
5709  bool m_UseMutex;
5710  bool m_UseKhrDedicatedAllocation;
5711  VkDevice m_hDevice;
5712  bool m_AllocationCallbacksSpecified;
5713  VkAllocationCallbacks m_AllocationCallbacks;
5714  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5715 
5716  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5717  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5718  VMA_MUTEX m_HeapSizeLimitMutex;
5719 
5720  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5721  VkPhysicalDeviceMemoryProperties m_MemProps;
5722 
5723  // Default pools.
5724  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5725 
5726  // Each vector is sorted by memory (handle value).
5727  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5728  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5729  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5730 
5731  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5732  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5733  ~VmaAllocator_T();
5734 
5735  const VkAllocationCallbacks* GetAllocationCallbacks() const
5736  {
5737  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5738  }
5739  const VmaVulkanFunctions& GetVulkanFunctions() const
5740  {
5741  return m_VulkanFunctions;
5742  }
5743 
5744  VkDeviceSize GetBufferImageGranularity() const
5745  {
5746  return VMA_MAX(
5747  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5748  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5749  }
5750 
5751  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5752  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5753 
5754  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5755  {
5756  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5757  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5758  }
5759  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5760  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5761  {
5762  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5763  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5764  }
5765  // Minimum alignment for all allocations in specific memory type.
5766  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5767  {
5768  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5769  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5770  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5771  }
5772 
5773  bool IsIntegratedGpu() const
5774  {
5775  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5776  }
5777 
5778 #if VMA_RECORDING_ENABLED
5779  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5780 #endif
5781 
5782  void GetBufferMemoryRequirements(
5783  VkBuffer hBuffer,
5784  VkMemoryRequirements& memReq,
5785  bool& requiresDedicatedAllocation,
5786  bool& prefersDedicatedAllocation) const;
5787  void GetImageMemoryRequirements(
5788  VkImage hImage,
5789  VkMemoryRequirements& memReq,
5790  bool& requiresDedicatedAllocation,
5791  bool& prefersDedicatedAllocation) const;
5792 
5793  // Main allocation function.
5794  VkResult AllocateMemory(
5795  const VkMemoryRequirements& vkMemReq,
5796  bool requiresDedicatedAllocation,
5797  bool prefersDedicatedAllocation,
5798  VkBuffer dedicatedBuffer,
5799  VkImage dedicatedImage,
5800  const VmaAllocationCreateInfo& createInfo,
5801  VmaSuballocationType suballocType,
5802  VmaAllocation* pAllocation);
5803 
5804  // Main deallocation function.
5805  void FreeMemory(const VmaAllocation allocation);
5806 
5807  VkResult ResizeAllocation(
5808  const VmaAllocation alloc,
5809  VkDeviceSize newSize);
5810 
5811  void CalculateStats(VmaStats* pStats);
5812 
5813 #if VMA_STATS_STRING_ENABLED
5814  void PrintDetailedMap(class VmaJsonWriter& json);
5815 #endif
5816 
5817  VkResult Defragment(
5818  VmaAllocation* pAllocations,
5819  size_t allocationCount,
5820  VkBool32* pAllocationsChanged,
5821  const VmaDefragmentationInfo* pDefragmentationInfo,
5822  VmaDefragmentationStats* pDefragmentationStats);
5823 
5824  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5825  bool TouchAllocation(VmaAllocation hAllocation);
5826 
5827  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5828  void DestroyPool(VmaPool pool);
5829  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5830 
5831  void SetCurrentFrameIndex(uint32_t frameIndex);
5832  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5833 
5834  void MakePoolAllocationsLost(
5835  VmaPool hPool,
5836  size_t* pLostAllocationCount);
5837  VkResult CheckPoolCorruption(VmaPool hPool);
5838  VkResult CheckCorruption(uint32_t memoryTypeBits);
5839 
5840  void CreateLostAllocation(VmaAllocation* pAllocation);
5841 
5842  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5843  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5844 
5845  VkResult Map(VmaAllocation hAllocation, void** ppData);
5846  void Unmap(VmaAllocation hAllocation);
5847 
5848  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5849  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5850 
5851  void FlushOrInvalidateAllocation(
5852  VmaAllocation hAllocation,
5853  VkDeviceSize offset, VkDeviceSize size,
5854  VMA_CACHE_OPERATION op);
5855 
5856  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5857 
5858 private:
5859  VkDeviceSize m_PreferredLargeHeapBlockSize;
5860 
5861  VkPhysicalDevice m_PhysicalDevice;
5862  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5863 
5864  VMA_MUTEX m_PoolsMutex;
5865  // Protected by m_PoolsMutex. Sorted by pointer value.
5866  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5867  uint32_t m_NextPoolId;
5868 
5869  VmaVulkanFunctions m_VulkanFunctions;
5870 
5871 #if VMA_RECORDING_ENABLED
5872  VmaRecorder* m_pRecorder;
5873 #endif
5874 
5875  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5876 
5877  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5878 
5879  VkResult AllocateMemoryOfType(
5880  VkDeviceSize size,
5881  VkDeviceSize alignment,
5882  bool dedicatedAllocation,
5883  VkBuffer dedicatedBuffer,
5884  VkImage dedicatedImage,
5885  const VmaAllocationCreateInfo& createInfo,
5886  uint32_t memTypeIndex,
5887  VmaSuballocationType suballocType,
5888  VmaAllocation* pAllocation);
5889 
5890  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5891  VkResult AllocateDedicatedMemory(
5892  VkDeviceSize size,
5893  VmaSuballocationType suballocType,
5894  uint32_t memTypeIndex,
5895  bool map,
5896  bool isUserDataString,
5897  void* pUserData,
5898  VkBuffer dedicatedBuffer,
5899  VkImage dedicatedImage,
5900  VmaAllocation* pAllocation);
5901 
5902  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5903  void FreeDedicatedMemory(VmaAllocation allocation);
5904 };
5905 
5907 // Memory allocation #2 after VmaAllocator_T definition
5908 
5909 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5910 {
5911  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5912 }
5913 
5914 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5915 {
5916  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5917 }
5918 
5919 template<typename T>
5920 static T* VmaAllocate(VmaAllocator hAllocator)
5921 {
5922  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5923 }
5924 
5925 template<typename T>
5926 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5927 {
5928  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5929 }
5930 
5931 template<typename T>
5932 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5933 {
5934  if(ptr != VMA_NULL)
5935  {
5936  ptr->~T();
5937  VmaFree(hAllocator, ptr);
5938  }
5939 }
5940 
5941 template<typename T>
5942 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5943 {
5944  if(ptr != VMA_NULL)
5945  {
5946  for(size_t i = count; i--; )
5947  ptr[i].~T();
5948  VmaFree(hAllocator, ptr);
5949  }
5950 }
5951 
5953 // VmaStringBuilder
5954 
5955 #if VMA_STATS_STRING_ENABLED
5956 
5957 class VmaStringBuilder
5958 {
5959 public:
5960  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5961  size_t GetLength() const { return m_Data.size(); }
5962  const char* GetData() const { return m_Data.data(); }
5963 
5964  void Add(char ch) { m_Data.push_back(ch); }
5965  void Add(const char* pStr);
5966  void AddNewLine() { Add('\n'); }
5967  void AddNumber(uint32_t num);
5968  void AddNumber(uint64_t num);
5969  void AddPointer(const void* ptr);
5970 
5971 private:
5972  VmaVector< char, VmaStlAllocator<char> > m_Data;
5973 };
5974 
5975 void VmaStringBuilder::Add(const char* pStr)
5976 {
5977  const size_t strLen = strlen(pStr);
5978  if(strLen > 0)
5979  {
5980  const size_t oldCount = m_Data.size();
5981  m_Data.resize(oldCount + strLen);
5982  memcpy(m_Data.data() + oldCount, pStr, strLen);
5983  }
5984 }
5985 
5986 void VmaStringBuilder::AddNumber(uint32_t num)
5987 {
5988  char buf[11];
5989  VmaUint32ToStr(buf, sizeof(buf), num);
5990  Add(buf);
5991 }
5992 
5993 void VmaStringBuilder::AddNumber(uint64_t num)
5994 {
5995  char buf[21];
5996  VmaUint64ToStr(buf, sizeof(buf), num);
5997  Add(buf);
5998 }
5999 
6000 void VmaStringBuilder::AddPointer(const void* ptr)
6001 {
6002  char buf[21];
6003  VmaPtrToStr(buf, sizeof(buf), ptr);
6004  Add(buf);
6005 }
6006 
6007 #endif // #if VMA_STATS_STRING_ENABLED
6008 
6010 // VmaJsonWriter
6011 
6012 #if VMA_STATS_STRING_ENABLED
6013 
6014 class VmaJsonWriter
6015 {
6016  VMA_CLASS_NO_COPY(VmaJsonWriter)
6017 public:
6018  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6019  ~VmaJsonWriter();
6020 
6021  void BeginObject(bool singleLine = false);
6022  void EndObject();
6023 
6024  void BeginArray(bool singleLine = false);
6025  void EndArray();
6026 
6027  void WriteString(const char* pStr);
6028  void BeginString(const char* pStr = VMA_NULL);
6029  void ContinueString(const char* pStr);
6030  void ContinueString(uint32_t n);
6031  void ContinueString(uint64_t n);
6032  void ContinueString_Pointer(const void* ptr);
6033  void EndString(const char* pStr = VMA_NULL);
6034 
6035  void WriteNumber(uint32_t n);
6036  void WriteNumber(uint64_t n);
6037  void WriteBool(bool b);
6038  void WriteNull();
6039 
6040 private:
6041  static const char* const INDENT;
6042 
6043  enum COLLECTION_TYPE
6044  {
6045  COLLECTION_TYPE_OBJECT,
6046  COLLECTION_TYPE_ARRAY,
6047  };
6048  struct StackItem
6049  {
6050  COLLECTION_TYPE type;
6051  uint32_t valueCount;
6052  bool singleLineMode;
6053  };
6054 
6055  VmaStringBuilder& m_SB;
6056  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6057  bool m_InsideString;
6058 
6059  void BeginValue(bool isString);
6060  void WriteIndent(bool oneLess = false);
6061 };
6062 
6063 const char* const VmaJsonWriter::INDENT = " ";
6064 
6065 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6066  m_SB(sb),
6067  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6068  m_InsideString(false)
6069 {
6070 }
6071 
6072 VmaJsonWriter::~VmaJsonWriter()
6073 {
6074  VMA_ASSERT(!m_InsideString);
6075  VMA_ASSERT(m_Stack.empty());
6076 }
6077 
6078 void VmaJsonWriter::BeginObject(bool singleLine)
6079 {
6080  VMA_ASSERT(!m_InsideString);
6081 
6082  BeginValue(false);
6083  m_SB.Add('{');
6084 
6085  StackItem item;
6086  item.type = COLLECTION_TYPE_OBJECT;
6087  item.valueCount = 0;
6088  item.singleLineMode = singleLine;
6089  m_Stack.push_back(item);
6090 }
6091 
6092 void VmaJsonWriter::EndObject()
6093 {
6094  VMA_ASSERT(!m_InsideString);
6095 
6096  WriteIndent(true);
6097  m_SB.Add('}');
6098 
6099  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6100  m_Stack.pop_back();
6101 }
6102 
6103 void VmaJsonWriter::BeginArray(bool singleLine)
6104 {
6105  VMA_ASSERT(!m_InsideString);
6106 
6107  BeginValue(false);
6108  m_SB.Add('[');
6109 
6110  StackItem item;
6111  item.type = COLLECTION_TYPE_ARRAY;
6112  item.valueCount = 0;
6113  item.singleLineMode = singleLine;
6114  m_Stack.push_back(item);
6115 }
6116 
6117 void VmaJsonWriter::EndArray()
6118 {
6119  VMA_ASSERT(!m_InsideString);
6120 
6121  WriteIndent(true);
6122  m_SB.Add(']');
6123 
6124  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6125  m_Stack.pop_back();
6126 }
6127 
6128 void VmaJsonWriter::WriteString(const char* pStr)
6129 {
6130  BeginString(pStr);
6131  EndString();
6132 }
6133 
6134 void VmaJsonWriter::BeginString(const char* pStr)
6135 {
6136  VMA_ASSERT(!m_InsideString);
6137 
6138  BeginValue(true);
6139  m_SB.Add('"');
6140  m_InsideString = true;
6141  if(pStr != VMA_NULL && pStr[0] != '\0')
6142  {
6143  ContinueString(pStr);
6144  }
6145 }
6146 
6147 void VmaJsonWriter::ContinueString(const char* pStr)
6148 {
6149  VMA_ASSERT(m_InsideString);
6150 
6151  const size_t strLen = strlen(pStr);
6152  for(size_t i = 0; i < strLen; ++i)
6153  {
6154  char ch = pStr[i];
6155  if(ch == '\\')
6156  {
6157  m_SB.Add("\\\\");
6158  }
6159  else if(ch == '"')
6160  {
6161  m_SB.Add("\\\"");
6162  }
6163  else if(ch >= 32)
6164  {
6165  m_SB.Add(ch);
6166  }
6167  else switch(ch)
6168  {
6169  case '\b':
6170  m_SB.Add("\\b");
6171  break;
6172  case '\f':
6173  m_SB.Add("\\f");
6174  break;
6175  case '\n':
6176  m_SB.Add("\\n");
6177  break;
6178  case '\r':
6179  m_SB.Add("\\r");
6180  break;
6181  case '\t':
6182  m_SB.Add("\\t");
6183  break;
6184  default:
6185  VMA_ASSERT(0 && "Character not currently supported.");
6186  break;
6187  }
6188  }
6189 }
6190 
6191 void VmaJsonWriter::ContinueString(uint32_t n)
6192 {
6193  VMA_ASSERT(m_InsideString);
6194  m_SB.AddNumber(n);
6195 }
6196 
6197 void VmaJsonWriter::ContinueString(uint64_t n)
6198 {
6199  VMA_ASSERT(m_InsideString);
6200  m_SB.AddNumber(n);
6201 }
6202 
6203 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6204 {
6205  VMA_ASSERT(m_InsideString);
6206  m_SB.AddPointer(ptr);
6207 }
6208 
6209 void VmaJsonWriter::EndString(const char* pStr)
6210 {
6211  VMA_ASSERT(m_InsideString);
6212  if(pStr != VMA_NULL && pStr[0] != '\0')
6213  {
6214  ContinueString(pStr);
6215  }
6216  m_SB.Add('"');
6217  m_InsideString = false;
6218 }
6219 
6220 void VmaJsonWriter::WriteNumber(uint32_t n)
6221 {
6222  VMA_ASSERT(!m_InsideString);
6223  BeginValue(false);
6224  m_SB.AddNumber(n);
6225 }
6226 
6227 void VmaJsonWriter::WriteNumber(uint64_t n)
6228 {
6229  VMA_ASSERT(!m_InsideString);
6230  BeginValue(false);
6231  m_SB.AddNumber(n);
6232 }
6233 
6234 void VmaJsonWriter::WriteBool(bool b)
6235 {
6236  VMA_ASSERT(!m_InsideString);
6237  BeginValue(false);
6238  m_SB.Add(b ? "true" : "false");
6239 }
6240 
6241 void VmaJsonWriter::WriteNull()
6242 {
6243  VMA_ASSERT(!m_InsideString);
6244  BeginValue(false);
6245  m_SB.Add("null");
6246 }
6247 
6248 void VmaJsonWriter::BeginValue(bool isString)
6249 {
6250  if(!m_Stack.empty())
6251  {
6252  StackItem& currItem = m_Stack.back();
6253  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6254  currItem.valueCount % 2 == 0)
6255  {
6256  VMA_ASSERT(isString);
6257  }
6258 
6259  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6260  currItem.valueCount % 2 != 0)
6261  {
6262  m_SB.Add(": ");
6263  }
6264  else if(currItem.valueCount > 0)
6265  {
6266  m_SB.Add(", ");
6267  WriteIndent();
6268  }
6269  else
6270  {
6271  WriteIndent();
6272  }
6273  ++currItem.valueCount;
6274  }
6275 }
6276 
6277 void VmaJsonWriter::WriteIndent(bool oneLess)
6278 {
6279  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6280  {
6281  m_SB.AddNewLine();
6282 
6283  size_t count = m_Stack.size();
6284  if(count > 0 && oneLess)
6285  {
6286  --count;
6287  }
6288  for(size_t i = 0; i < count; ++i)
6289  {
6290  m_SB.Add(INDENT);
6291  }
6292  }
6293 }
6294 
6295 #endif // #if VMA_STATS_STRING_ENABLED
6296 
6298 
6299 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6300 {
6301  if(IsUserDataString())
6302  {
6303  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6304 
6305  FreeUserDataString(hAllocator);
6306 
6307  if(pUserData != VMA_NULL)
6308  {
6309  const char* const newStrSrc = (char*)pUserData;
6310  const size_t newStrLen = strlen(newStrSrc);
6311  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6312  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6313  m_pUserData = newStrDst;
6314  }
6315  }
6316  else
6317  {
6318  m_pUserData = pUserData;
6319  }
6320 }
6321 
6322 void VmaAllocation_T::ChangeBlockAllocation(
6323  VmaAllocator hAllocator,
6324  VmaDeviceMemoryBlock* block,
6325  VkDeviceSize offset)
6326 {
6327  VMA_ASSERT(block != VMA_NULL);
6328  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6329 
6330  // Move mapping reference counter from old block to new block.
6331  if(block != m_BlockAllocation.m_Block)
6332  {
6333  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6334  if(IsPersistentMap())
6335  ++mapRefCount;
6336  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6337  block->Map(hAllocator, mapRefCount, VMA_NULL);
6338  }
6339 
6340  m_BlockAllocation.m_Block = block;
6341  m_BlockAllocation.m_Offset = offset;
6342 }
6343 
6344 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6345 {
6346  VMA_ASSERT(newSize > 0);
6347  m_Size = newSize;
6348 }
6349 
6350 VkDeviceSize VmaAllocation_T::GetOffset() const
6351 {
6352  switch(m_Type)
6353  {
6354  case ALLOCATION_TYPE_BLOCK:
6355  return m_BlockAllocation.m_Offset;
6356  case ALLOCATION_TYPE_DEDICATED:
6357  return 0;
6358  default:
6359  VMA_ASSERT(0);
6360  return 0;
6361  }
6362 }
6363 
6364 VkDeviceMemory VmaAllocation_T::GetMemory() const
6365 {
6366  switch(m_Type)
6367  {
6368  case ALLOCATION_TYPE_BLOCK:
6369  return m_BlockAllocation.m_Block->GetDeviceMemory();
6370  case ALLOCATION_TYPE_DEDICATED:
6371  return m_DedicatedAllocation.m_hMemory;
6372  default:
6373  VMA_ASSERT(0);
6374  return VK_NULL_HANDLE;
6375  }
6376 }
6377 
6378 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6379 {
6380  switch(m_Type)
6381  {
6382  case ALLOCATION_TYPE_BLOCK:
6383  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6384  case ALLOCATION_TYPE_DEDICATED:
6385  return m_DedicatedAllocation.m_MemoryTypeIndex;
6386  default:
6387  VMA_ASSERT(0);
6388  return UINT32_MAX;
6389  }
6390 }
6391 
6392 void* VmaAllocation_T::GetMappedData() const
6393 {
6394  switch(m_Type)
6395  {
6396  case ALLOCATION_TYPE_BLOCK:
6397  if(m_MapCount != 0)
6398  {
6399  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6400  VMA_ASSERT(pBlockData != VMA_NULL);
6401  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6402  }
6403  else
6404  {
6405  return VMA_NULL;
6406  }
6407  break;
6408  case ALLOCATION_TYPE_DEDICATED:
6409  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6410  return m_DedicatedAllocation.m_pMappedData;
6411  default:
6412  VMA_ASSERT(0);
6413  return VMA_NULL;
6414  }
6415 }
6416 
6417 bool VmaAllocation_T::CanBecomeLost() const
6418 {
6419  switch(m_Type)
6420  {
6421  case ALLOCATION_TYPE_BLOCK:
6422  return m_BlockAllocation.m_CanBecomeLost;
6423  case ALLOCATION_TYPE_DEDICATED:
6424  return false;
6425  default:
6426  VMA_ASSERT(0);
6427  return false;
6428  }
6429 }
6430 
6431 VmaPool VmaAllocation_T::GetPool() const
6432 {
6433  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6434  return m_BlockAllocation.m_hPool;
6435 }
6436 
6437 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6438 {
6439  VMA_ASSERT(CanBecomeLost());
6440 
6441  /*
6442  Warning: This is a carefully designed algorithm.
6443  Do not modify unless you really know what you're doing :)
6444  */
6445  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6446  for(;;)
6447  {
6448  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6449  {
6450  VMA_ASSERT(0);
6451  return false;
6452  }
6453  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6454  {
6455  return false;
6456  }
6457  else // Last use time earlier than current time.
6458  {
6459  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6460  {
6461  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6462  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6463  return true;
6464  }
6465  }
6466  }
6467 }
6468 
6469 #if VMA_STATS_STRING_ENABLED
6470 
6471 // Correspond to values of enum VmaSuballocationType.
6472 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6473  "FREE",
6474  "UNKNOWN",
6475  "BUFFER",
6476  "IMAGE_UNKNOWN",
6477  "IMAGE_LINEAR",
6478  "IMAGE_OPTIMAL",
6479 };
6480 
6481 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6482 {
6483  json.WriteString("Type");
6484  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6485 
6486  json.WriteString("Size");
6487  json.WriteNumber(m_Size);
6488 
6489  if(m_pUserData != VMA_NULL)
6490  {
6491  json.WriteString("UserData");
6492  if(IsUserDataString())
6493  {
6494  json.WriteString((const char*)m_pUserData);
6495  }
6496  else
6497  {
6498  json.BeginString();
6499  json.ContinueString_Pointer(m_pUserData);
6500  json.EndString();
6501  }
6502  }
6503 
6504  json.WriteString("CreationFrameIndex");
6505  json.WriteNumber(m_CreationFrameIndex);
6506 
6507  json.WriteString("LastUseFrameIndex");
6508  json.WriteNumber(GetLastUseFrameIndex());
6509 
6510  if(m_BufferImageUsage != 0)
6511  {
6512  json.WriteString("Usage");
6513  json.WriteNumber(m_BufferImageUsage);
6514  }
6515 }
6516 
6517 #endif
6518 
6519 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6520 {
6521  VMA_ASSERT(IsUserDataString());
6522  if(m_pUserData != VMA_NULL)
6523  {
6524  char* const oldStr = (char*)m_pUserData;
6525  const size_t oldStrLen = strlen(oldStr);
6526  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6527  m_pUserData = VMA_NULL;
6528  }
6529 }
6530 
6531 void VmaAllocation_T::BlockAllocMap()
6532 {
6533  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6534 
6535  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6536  {
6537  ++m_MapCount;
6538  }
6539  else
6540  {
6541  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6542  }
6543 }
6544 
6545 void VmaAllocation_T::BlockAllocUnmap()
6546 {
6547  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6548 
6549  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6550  {
6551  --m_MapCount;
6552  }
6553  else
6554  {
6555  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6556  }
6557 }
6558 
6559 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6560 {
6561  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6562 
6563  if(m_MapCount != 0)
6564  {
6565  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6566  {
6567  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6568  *ppData = m_DedicatedAllocation.m_pMappedData;
6569  ++m_MapCount;
6570  return VK_SUCCESS;
6571  }
6572  else
6573  {
6574  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6575  return VK_ERROR_MEMORY_MAP_FAILED;
6576  }
6577  }
6578  else
6579  {
6580  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6581  hAllocator->m_hDevice,
6582  m_DedicatedAllocation.m_hMemory,
6583  0, // offset
6584  VK_WHOLE_SIZE,
6585  0, // flags
6586  ppData);
6587  if(result == VK_SUCCESS)
6588  {
6589  m_DedicatedAllocation.m_pMappedData = *ppData;
6590  m_MapCount = 1;
6591  }
6592  return result;
6593  }
6594 }
6595 
6596 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6597 {
6598  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6599 
6600  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6601  {
6602  --m_MapCount;
6603  if(m_MapCount == 0)
6604  {
6605  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6606  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6607  hAllocator->m_hDevice,
6608  m_DedicatedAllocation.m_hMemory);
6609  }
6610  }
6611  else
6612  {
6613  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6614  }
6615 }
6616 
6617 #if VMA_STATS_STRING_ENABLED
6618 
6619 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6620 {
6621  json.BeginObject();
6622 
6623  json.WriteString("Blocks");
6624  json.WriteNumber(stat.blockCount);
6625 
6626  json.WriteString("Allocations");
6627  json.WriteNumber(stat.allocationCount);
6628 
6629  json.WriteString("UnusedRanges");
6630  json.WriteNumber(stat.unusedRangeCount);
6631 
6632  json.WriteString("UsedBytes");
6633  json.WriteNumber(stat.usedBytes);
6634 
6635  json.WriteString("UnusedBytes");
6636  json.WriteNumber(stat.unusedBytes);
6637 
6638  if(stat.allocationCount > 1)
6639  {
6640  json.WriteString("AllocationSize");
6641  json.BeginObject(true);
6642  json.WriteString("Min");
6643  json.WriteNumber(stat.allocationSizeMin);
6644  json.WriteString("Avg");
6645  json.WriteNumber(stat.allocationSizeAvg);
6646  json.WriteString("Max");
6647  json.WriteNumber(stat.allocationSizeMax);
6648  json.EndObject();
6649  }
6650 
6651  if(stat.unusedRangeCount > 1)
6652  {
6653  json.WriteString("UnusedRangeSize");
6654  json.BeginObject(true);
6655  json.WriteString("Min");
6656  json.WriteNumber(stat.unusedRangeSizeMin);
6657  json.WriteString("Avg");
6658  json.WriteNumber(stat.unusedRangeSizeAvg);
6659  json.WriteString("Max");
6660  json.WriteNumber(stat.unusedRangeSizeMax);
6661  json.EndObject();
6662  }
6663 
6664  json.EndObject();
6665 }
6666 
6667 #endif // #if VMA_STATS_STRING_ENABLED
6668 
6669 struct VmaSuballocationItemSizeLess
6670 {
6671  bool operator()(
6672  const VmaSuballocationList::iterator lhs,
6673  const VmaSuballocationList::iterator rhs) const
6674  {
6675  return lhs->size < rhs->size;
6676  }
6677  bool operator()(
6678  const VmaSuballocationList::iterator lhs,
6679  VkDeviceSize rhsSize) const
6680  {
6681  return lhs->size < rhsSize;
6682  }
6683 };
6684 
6685 
6687 // class VmaBlockMetadata
6688 
6689 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6690  m_Size(0),
6691  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6692 {
6693 }
6694 
6695 #if VMA_STATS_STRING_ENABLED
6696 
6697 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6698  VkDeviceSize unusedBytes,
6699  size_t allocationCount,
6700  size_t unusedRangeCount) const
6701 {
6702  json.BeginObject();
6703 
6704  json.WriteString("TotalBytes");
6705  json.WriteNumber(GetSize());
6706 
6707  json.WriteString("UnusedBytes");
6708  json.WriteNumber(unusedBytes);
6709 
6710  json.WriteString("Allocations");
6711  json.WriteNumber((uint64_t)allocationCount);
6712 
6713  json.WriteString("UnusedRanges");
6714  json.WriteNumber((uint64_t)unusedRangeCount);
6715 
6716  json.WriteString("Suballocations");
6717  json.BeginArray();
6718 }
6719 
6720 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6721  VkDeviceSize offset,
6722  VmaAllocation hAllocation) const
6723 {
6724  json.BeginObject(true);
6725 
6726  json.WriteString("Offset");
6727  json.WriteNumber(offset);
6728 
6729  hAllocation->PrintParameters(json);
6730 
6731  json.EndObject();
6732 }
6733 
6734 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6735  VkDeviceSize offset,
6736  VkDeviceSize size) const
6737 {
6738  json.BeginObject(true);
6739 
6740  json.WriteString("Offset");
6741  json.WriteNumber(offset);
6742 
6743  json.WriteString("Type");
6744  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6745 
6746  json.WriteString("Size");
6747  json.WriteNumber(size);
6748 
6749  json.EndObject();
6750 }
6751 
6752 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6753 {
6754  json.EndArray();
6755  json.EndObject();
6756 }
6757 
6758 #endif // #if VMA_STATS_STRING_ENABLED
6759 
6761 // class VmaBlockMetadata_Generic
6762 
6763 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6764  VmaBlockMetadata(hAllocator),
6765  m_FreeCount(0),
6766  m_SumFreeSize(0),
6767  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6768  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6769 {
6770 }
6771 
6772 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6773 {
6774 }
6775 
6776 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6777 {
6778  VmaBlockMetadata::Init(size);
6779 
6780  m_FreeCount = 1;
6781  m_SumFreeSize = size;
6782 
6783  VmaSuballocation suballoc = {};
6784  suballoc.offset = 0;
6785  suballoc.size = size;
6786  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6787  suballoc.hAllocation = VK_NULL_HANDLE;
6788 
6789  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6790  m_Suballocations.push_back(suballoc);
6791  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6792  --suballocItem;
6793  m_FreeSuballocationsBySize.push_back(suballocItem);
6794 }
6795 
6796 bool VmaBlockMetadata_Generic::Validate() const
6797 {
6798  VMA_VALIDATE(!m_Suballocations.empty());
6799 
6800  // Expected offset of new suballocation as calculated from previous ones.
6801  VkDeviceSize calculatedOffset = 0;
6802  // Expected number of free suballocations as calculated from traversing their list.
6803  uint32_t calculatedFreeCount = 0;
6804  // Expected sum size of free suballocations as calculated from traversing their list.
6805  VkDeviceSize calculatedSumFreeSize = 0;
6806  // Expected number of free suballocations that should be registered in
6807  // m_FreeSuballocationsBySize calculated from traversing their list.
6808  size_t freeSuballocationsToRegister = 0;
6809  // True if previous visited suballocation was free.
6810  bool prevFree = false;
6811 
6812  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6813  suballocItem != m_Suballocations.cend();
6814  ++suballocItem)
6815  {
6816  const VmaSuballocation& subAlloc = *suballocItem;
6817 
6818  // Actual offset of this suballocation doesn't match expected one.
6819  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6820 
6821  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6822  // Two adjacent free suballocations are invalid. They should be merged.
6823  VMA_VALIDATE(!prevFree || !currFree);
6824 
6825  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6826 
6827  if(currFree)
6828  {
6829  calculatedSumFreeSize += subAlloc.size;
6830  ++calculatedFreeCount;
6831  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6832  {
6833  ++freeSuballocationsToRegister;
6834  }
6835 
6836  // Margin required between allocations - every free space must be at least that large.
6837  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6838  }
6839  else
6840  {
6841  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6842  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6843 
6844  // Margin required between allocations - previous allocation must be free.
6845  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6846  }
6847 
6848  calculatedOffset += subAlloc.size;
6849  prevFree = currFree;
6850  }
6851 
6852  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6853  // match expected one.
6854  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6855 
6856  VkDeviceSize lastSize = 0;
6857  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6858  {
6859  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6860 
6861  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6862  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6863  // They must be sorted by size ascending.
6864  VMA_VALIDATE(suballocItem->size >= lastSize);
6865 
6866  lastSize = suballocItem->size;
6867  }
6868 
6869  // Check if totals match calculacted values.
6870  VMA_VALIDATE(ValidateFreeSuballocationList());
6871  VMA_VALIDATE(calculatedOffset == GetSize());
6872  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6873  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6874 
6875  return true;
6876 }
6877 
6878 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6879 {
6880  if(!m_FreeSuballocationsBySize.empty())
6881  {
6882  return m_FreeSuballocationsBySize.back()->size;
6883  }
6884  else
6885  {
6886  return 0;
6887  }
6888 }
6889 
6890 bool VmaBlockMetadata_Generic::IsEmpty() const
6891 {
6892  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6893 }
6894 
6895 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6896 {
6897  outInfo.blockCount = 1;
6898 
6899  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6900  outInfo.allocationCount = rangeCount - m_FreeCount;
6901  outInfo.unusedRangeCount = m_FreeCount;
6902 
6903  outInfo.unusedBytes = m_SumFreeSize;
6904  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6905 
6906  outInfo.allocationSizeMin = UINT64_MAX;
6907  outInfo.allocationSizeMax = 0;
6908  outInfo.unusedRangeSizeMin = UINT64_MAX;
6909  outInfo.unusedRangeSizeMax = 0;
6910 
6911  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6912  suballocItem != m_Suballocations.cend();
6913  ++suballocItem)
6914  {
6915  const VmaSuballocation& suballoc = *suballocItem;
6916  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6917  {
6918  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6919  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6920  }
6921  else
6922  {
6923  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6924  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6925  }
6926  }
6927 }
6928 
6929 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6930 {
6931  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6932 
6933  inoutStats.size += GetSize();
6934  inoutStats.unusedSize += m_SumFreeSize;
6935  inoutStats.allocationCount += rangeCount - m_FreeCount;
6936  inoutStats.unusedRangeCount += m_FreeCount;
6937  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6938 }
6939 
6940 #if VMA_STATS_STRING_ENABLED
6941 
6942 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6943 {
6944  PrintDetailedMap_Begin(json,
6945  m_SumFreeSize, // unusedBytes
6946  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6947  m_FreeCount); // unusedRangeCount
6948 
6949  size_t i = 0;
6950  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6951  suballocItem != m_Suballocations.cend();
6952  ++suballocItem, ++i)
6953  {
6954  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6955  {
6956  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6957  }
6958  else
6959  {
6960  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6961  }
6962  }
6963 
6964  PrintDetailedMap_End(json);
6965 }
6966 
6967 #endif // #if VMA_STATS_STRING_ENABLED
6968 
6969 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6970  uint32_t currentFrameIndex,
6971  uint32_t frameInUseCount,
6972  VkDeviceSize bufferImageGranularity,
6973  VkDeviceSize allocSize,
6974  VkDeviceSize allocAlignment,
6975  bool upperAddress,
6976  VmaSuballocationType allocType,
6977  bool canMakeOtherLost,
6978  uint32_t strategy,
6979  VmaAllocationRequest* pAllocationRequest)
6980 {
6981  VMA_ASSERT(allocSize > 0);
6982  VMA_ASSERT(!upperAddress);
6983  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6984  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6985  VMA_HEAVY_ASSERT(Validate());
6986 
6987  // There is not enough total free space in this block to fullfill the request: Early return.
6988  if(canMakeOtherLost == false &&
6989  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6990  {
6991  return false;
6992  }
6993 
6994  // New algorithm, efficiently searching freeSuballocationsBySize.
6995  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6996  if(freeSuballocCount > 0)
6997  {
6999  {
7000  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7001  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7002  m_FreeSuballocationsBySize.data(),
7003  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7004  allocSize + 2 * VMA_DEBUG_MARGIN,
7005  VmaSuballocationItemSizeLess());
7006  size_t index = it - m_FreeSuballocationsBySize.data();
7007  for(; index < freeSuballocCount; ++index)
7008  {
7009  if(CheckAllocation(
7010  currentFrameIndex,
7011  frameInUseCount,
7012  bufferImageGranularity,
7013  allocSize,
7014  allocAlignment,
7015  allocType,
7016  m_FreeSuballocationsBySize[index],
7017  false, // canMakeOtherLost
7018  &pAllocationRequest->offset,
7019  &pAllocationRequest->itemsToMakeLostCount,
7020  &pAllocationRequest->sumFreeSize,
7021  &pAllocationRequest->sumItemSize))
7022  {
7023  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7024  return true;
7025  }
7026  }
7027  }
7028  else // WORST_FIT, FIRST_FIT
7029  {
7030  // Search staring from biggest suballocations.
7031  for(size_t index = freeSuballocCount; index--; )
7032  {
7033  if(CheckAllocation(
7034  currentFrameIndex,
7035  frameInUseCount,
7036  bufferImageGranularity,
7037  allocSize,
7038  allocAlignment,
7039  allocType,
7040  m_FreeSuballocationsBySize[index],
7041  false, // canMakeOtherLost
7042  &pAllocationRequest->offset,
7043  &pAllocationRequest->itemsToMakeLostCount,
7044  &pAllocationRequest->sumFreeSize,
7045  &pAllocationRequest->sumItemSize))
7046  {
7047  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7048  return true;
7049  }
7050  }
7051  }
7052  }
7053 
7054  if(canMakeOtherLost)
7055  {
7056  // Brute-force algorithm. TODO: Come up with something better.
7057 
7058  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7059  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7060 
7061  VmaAllocationRequest tmpAllocRequest = {};
7062  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7063  suballocIt != m_Suballocations.end();
7064  ++suballocIt)
7065  {
7066  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7067  suballocIt->hAllocation->CanBecomeLost())
7068  {
7069  if(CheckAllocation(
7070  currentFrameIndex,
7071  frameInUseCount,
7072  bufferImageGranularity,
7073  allocSize,
7074  allocAlignment,
7075  allocType,
7076  suballocIt,
7077  canMakeOtherLost,
7078  &tmpAllocRequest.offset,
7079  &tmpAllocRequest.itemsToMakeLostCount,
7080  &tmpAllocRequest.sumFreeSize,
7081  &tmpAllocRequest.sumItemSize))
7082  {
7083  tmpAllocRequest.item = suballocIt;
7084 
7085  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7087  {
7088  *pAllocationRequest = tmpAllocRequest;
7089  }
7090  }
7091  }
7092  }
7093 
7094  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7095  {
7096  return true;
7097  }
7098  }
7099 
7100  return false;
7101 }
7102 
7103 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7104  uint32_t currentFrameIndex,
7105  uint32_t frameInUseCount,
7106  VmaAllocationRequest* pAllocationRequest)
7107 {
7108  while(pAllocationRequest->itemsToMakeLostCount > 0)
7109  {
7110  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7111  {
7112  ++pAllocationRequest->item;
7113  }
7114  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7115  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7116  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7117  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7118  {
7119  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7120  --pAllocationRequest->itemsToMakeLostCount;
7121  }
7122  else
7123  {
7124  return false;
7125  }
7126  }
7127 
7128  VMA_HEAVY_ASSERT(Validate());
7129  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7130  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7131 
7132  return true;
7133 }
7134 
7135 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7136 {
7137  uint32_t lostAllocationCount = 0;
7138  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7139  it != m_Suballocations.end();
7140  ++it)
7141  {
7142  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7143  it->hAllocation->CanBecomeLost() &&
7144  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7145  {
7146  it = FreeSuballocation(it);
7147  ++lostAllocationCount;
7148  }
7149  }
7150  return lostAllocationCount;
7151 }
7152 
7153 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7154 {
7155  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7156  it != m_Suballocations.end();
7157  ++it)
7158  {
7159  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7160  {
7161  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7162  {
7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7164  return VK_ERROR_VALIDATION_FAILED_EXT;
7165  }
7166  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7167  {
7168  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7169  return VK_ERROR_VALIDATION_FAILED_EXT;
7170  }
7171  }
7172  }
7173 
7174  return VK_SUCCESS;
7175 }
7176 
7177 void VmaBlockMetadata_Generic::Alloc(
7178  const VmaAllocationRequest& request,
7179  VmaSuballocationType type,
7180  VkDeviceSize allocSize,
7181  bool upperAddress,
7182  VmaAllocation hAllocation)
7183 {
7184  VMA_ASSERT(!upperAddress);
7185  VMA_ASSERT(request.item != m_Suballocations.end());
7186  VmaSuballocation& suballoc = *request.item;
7187  // Given suballocation is a free block.
7188  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7189  // Given offset is inside this suballocation.
7190  VMA_ASSERT(request.offset >= suballoc.offset);
7191  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7192  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7193  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7194 
7195  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7196  // it to become used.
7197  UnregisterFreeSuballocation(request.item);
7198 
7199  suballoc.offset = request.offset;
7200  suballoc.size = allocSize;
7201  suballoc.type = type;
7202  suballoc.hAllocation = hAllocation;
7203 
7204  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7205  if(paddingEnd)
7206  {
7207  VmaSuballocation paddingSuballoc = {};
7208  paddingSuballoc.offset = request.offset + allocSize;
7209  paddingSuballoc.size = paddingEnd;
7210  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7211  VmaSuballocationList::iterator next = request.item;
7212  ++next;
7213  const VmaSuballocationList::iterator paddingEndItem =
7214  m_Suballocations.insert(next, paddingSuballoc);
7215  RegisterFreeSuballocation(paddingEndItem);
7216  }
7217 
7218  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7219  if(paddingBegin)
7220  {
7221  VmaSuballocation paddingSuballoc = {};
7222  paddingSuballoc.offset = request.offset - paddingBegin;
7223  paddingSuballoc.size = paddingBegin;
7224  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7225  const VmaSuballocationList::iterator paddingBeginItem =
7226  m_Suballocations.insert(request.item, paddingSuballoc);
7227  RegisterFreeSuballocation(paddingBeginItem);
7228  }
7229 
7230  // Update totals.
7231  m_FreeCount = m_FreeCount - 1;
7232  if(paddingBegin > 0)
7233  {
7234  ++m_FreeCount;
7235  }
7236  if(paddingEnd > 0)
7237  {
7238  ++m_FreeCount;
7239  }
7240  m_SumFreeSize -= allocSize;
7241 }
7242 
7243 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7244 {
7245  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7246  suballocItem != m_Suballocations.end();
7247  ++suballocItem)
7248  {
7249  VmaSuballocation& suballoc = *suballocItem;
7250  if(suballoc.hAllocation == allocation)
7251  {
7252  FreeSuballocation(suballocItem);
7253  VMA_HEAVY_ASSERT(Validate());
7254  return;
7255  }
7256  }
7257  VMA_ASSERT(0 && "Not found!");
7258 }
7259 
7260 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7261 {
7262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7263  suballocItem != m_Suballocations.end();
7264  ++suballocItem)
7265  {
7266  VmaSuballocation& suballoc = *suballocItem;
7267  if(suballoc.offset == offset)
7268  {
7269  FreeSuballocation(suballocItem);
7270  return;
7271  }
7272  }
7273  VMA_ASSERT(0 && "Not found!");
7274 }
7275 
7276 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7277 {
7278  typedef VmaSuballocationList::iterator iter_type;
7279  for(iter_type suballocItem = m_Suballocations.begin();
7280  suballocItem != m_Suballocations.end();
7281  ++suballocItem)
7282  {
7283  VmaSuballocation& suballoc = *suballocItem;
7284  if(suballoc.hAllocation == alloc)
7285  {
7286  iter_type nextItem = suballocItem;
7287  ++nextItem;
7288 
7289  // Should have been ensured on higher level.
7290  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7291 
7292  // Shrinking.
7293  if(newSize < alloc->GetSize())
7294  {
7295  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7296 
7297  // There is next item.
7298  if(nextItem != m_Suballocations.end())
7299  {
7300  // Next item is free.
7301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7302  {
7303  // Grow this next item backward.
7304  UnregisterFreeSuballocation(nextItem);
7305  nextItem->offset -= sizeDiff;
7306  nextItem->size += sizeDiff;
7307  RegisterFreeSuballocation(nextItem);
7308  }
7309  // Next item is not free.
7310  else
7311  {
7312  // Create free item after current one.
7313  VmaSuballocation newFreeSuballoc;
7314  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7315  newFreeSuballoc.offset = suballoc.offset + newSize;
7316  newFreeSuballoc.size = sizeDiff;
7317  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7318  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7319  RegisterFreeSuballocation(newFreeSuballocIt);
7320 
7321  ++m_FreeCount;
7322  }
7323  }
7324  // This is the last item.
7325  else
7326  {
7327  // Create free item at the end.
7328  VmaSuballocation newFreeSuballoc;
7329  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7330  newFreeSuballoc.offset = suballoc.offset + newSize;
7331  newFreeSuballoc.size = sizeDiff;
7332  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7333  m_Suballocations.push_back(newFreeSuballoc);
7334 
7335  iter_type newFreeSuballocIt = m_Suballocations.end();
7336  RegisterFreeSuballocation(--newFreeSuballocIt);
7337 
7338  ++m_FreeCount;
7339  }
7340 
7341  suballoc.size = newSize;
7342  m_SumFreeSize += sizeDiff;
7343  }
7344  // Growing.
7345  else
7346  {
7347  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7348 
7349  // There is next item.
7350  if(nextItem != m_Suballocations.end())
7351  {
7352  // Next item is free.
7353  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7354  {
7355  // There is not enough free space, including margin.
7356  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7357  {
7358  return false;
7359  }
7360 
7361  // There is more free space than required.
7362  if(nextItem->size > sizeDiff)
7363  {
7364  // Move and shrink this next item.
7365  UnregisterFreeSuballocation(nextItem);
7366  nextItem->offset += sizeDiff;
7367  nextItem->size -= sizeDiff;
7368  RegisterFreeSuballocation(nextItem);
7369  }
7370  // There is exactly the amount of free space required.
7371  else
7372  {
7373  // Remove this next free item.
7374  UnregisterFreeSuballocation(nextItem);
7375  m_Suballocations.erase(nextItem);
7376  --m_FreeCount;
7377  }
7378  }
7379  // Next item is not free - there is no space to grow.
7380  else
7381  {
7382  return false;
7383  }
7384  }
7385  // This is the last item - there is no space to grow.
7386  else
7387  {
7388  return false;
7389  }
7390 
7391  suballoc.size = newSize;
7392  m_SumFreeSize -= sizeDiff;
7393  }
7394 
7395  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7396  return true;
7397  }
7398  }
7399  VMA_ASSERT(0 && "Not found!");
7400  return false;
7401 }
7402 
7403 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7404 {
7405  VkDeviceSize lastSize = 0;
7406  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7407  {
7408  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7409 
7410  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7411  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7412  VMA_VALIDATE(it->size >= lastSize);
7413  lastSize = it->size;
7414  }
7415  return true;
7416 }
7417 
7418 bool VmaBlockMetadata_Generic::CheckAllocation(
7419  uint32_t currentFrameIndex,
7420  uint32_t frameInUseCount,
7421  VkDeviceSize bufferImageGranularity,
7422  VkDeviceSize allocSize,
7423  VkDeviceSize allocAlignment,
7424  VmaSuballocationType allocType,
7425  VmaSuballocationList::const_iterator suballocItem,
7426  bool canMakeOtherLost,
7427  VkDeviceSize* pOffset,
7428  size_t* itemsToMakeLostCount,
7429  VkDeviceSize* pSumFreeSize,
7430  VkDeviceSize* pSumItemSize) const
7431 {
7432  VMA_ASSERT(allocSize > 0);
7433  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7434  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7435  VMA_ASSERT(pOffset != VMA_NULL);
7436 
7437  *itemsToMakeLostCount = 0;
7438  *pSumFreeSize = 0;
7439  *pSumItemSize = 0;
7440 
7441  if(canMakeOtherLost)
7442  {
7443  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7444  {
7445  *pSumFreeSize = suballocItem->size;
7446  }
7447  else
7448  {
7449  if(suballocItem->hAllocation->CanBecomeLost() &&
7450  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7451  {
7452  ++*itemsToMakeLostCount;
7453  *pSumItemSize = suballocItem->size;
7454  }
7455  else
7456  {
7457  return false;
7458  }
7459  }
7460 
7461  // Remaining size is too small for this request: Early return.
7462  if(GetSize() - suballocItem->offset < allocSize)
7463  {
7464  return false;
7465  }
7466 
7467  // Start from offset equal to beginning of this suballocation.
7468  *pOffset = suballocItem->offset;
7469 
7470  // Apply VMA_DEBUG_MARGIN at the beginning.
7471  if(VMA_DEBUG_MARGIN > 0)
7472  {
7473  *pOffset += VMA_DEBUG_MARGIN;
7474  }
7475 
7476  // Apply alignment.
7477  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7478 
7479  // Check previous suballocations for BufferImageGranularity conflicts.
7480  // Make bigger alignment if necessary.
7481  if(bufferImageGranularity > 1)
7482  {
7483  bool bufferImageGranularityConflict = false;
7484  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7485  while(prevSuballocItem != m_Suballocations.cbegin())
7486  {
7487  --prevSuballocItem;
7488  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7489  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7490  {
7491  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7492  {
7493  bufferImageGranularityConflict = true;
7494  break;
7495  }
7496  }
7497  else
7498  // Already on previous page.
7499  break;
7500  }
7501  if(bufferImageGranularityConflict)
7502  {
7503  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7504  }
7505  }
7506 
7507  // Now that we have final *pOffset, check if we are past suballocItem.
7508  // If yes, return false - this function should be called for another suballocItem as starting point.
7509  if(*pOffset >= suballocItem->offset + suballocItem->size)
7510  {
7511  return false;
7512  }
7513 
7514  // Calculate padding at the beginning based on current offset.
7515  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7516 
7517  // Calculate required margin at the end.
7518  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7519 
7520  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7521  // Another early return check.
7522  if(suballocItem->offset + totalSize > GetSize())
7523  {
7524  return false;
7525  }
7526 
7527  // Advance lastSuballocItem until desired size is reached.
7528  // Update itemsToMakeLostCount.
7529  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7530  if(totalSize > suballocItem->size)
7531  {
7532  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7533  while(remainingSize > 0)
7534  {
7535  ++lastSuballocItem;
7536  if(lastSuballocItem == m_Suballocations.cend())
7537  {
7538  return false;
7539  }
7540  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7541  {
7542  *pSumFreeSize += lastSuballocItem->size;
7543  }
7544  else
7545  {
7546  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7547  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7548  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7549  {
7550  ++*itemsToMakeLostCount;
7551  *pSumItemSize += lastSuballocItem->size;
7552  }
7553  else
7554  {
7555  return false;
7556  }
7557  }
7558  remainingSize = (lastSuballocItem->size < remainingSize) ?
7559  remainingSize - lastSuballocItem->size : 0;
7560  }
7561  }
7562 
7563  // Check next suballocations for BufferImageGranularity conflicts.
7564  // If conflict exists, we must mark more allocations lost or fail.
7565  if(bufferImageGranularity > 1)
7566  {
7567  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7568  ++nextSuballocItem;
7569  while(nextSuballocItem != m_Suballocations.cend())
7570  {
7571  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7572  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7573  {
7574  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7575  {
7576  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7577  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7578  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7579  {
7580  ++*itemsToMakeLostCount;
7581  }
7582  else
7583  {
7584  return false;
7585  }
7586  }
7587  }
7588  else
7589  {
7590  // Already on next page.
7591  break;
7592  }
7593  ++nextSuballocItem;
7594  }
7595  }
7596  }
7597  else
7598  {
7599  const VmaSuballocation& suballoc = *suballocItem;
7600  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7601 
7602  *pSumFreeSize = suballoc.size;
7603 
7604  // Size of this suballocation is too small for this request: Early return.
7605  if(suballoc.size < allocSize)
7606  {
7607  return false;
7608  }
7609 
7610  // Start from offset equal to beginning of this suballocation.
7611  *pOffset = suballoc.offset;
7612 
7613  // Apply VMA_DEBUG_MARGIN at the beginning.
7614  if(VMA_DEBUG_MARGIN > 0)
7615  {
7616  *pOffset += VMA_DEBUG_MARGIN;
7617  }
7618 
7619  // Apply alignment.
7620  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7621 
7622  // Check previous suballocations for BufferImageGranularity conflicts.
7623  // Make bigger alignment if necessary.
7624  if(bufferImageGranularity > 1)
7625  {
7626  bool bufferImageGranularityConflict = false;
7627  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7628  while(prevSuballocItem != m_Suballocations.cbegin())
7629  {
7630  --prevSuballocItem;
7631  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7633  {
7634  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7635  {
7636  bufferImageGranularityConflict = true;
7637  break;
7638  }
7639  }
7640  else
7641  // Already on previous page.
7642  break;
7643  }
7644  if(bufferImageGranularityConflict)
7645  {
7646  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7647  }
7648  }
7649 
7650  // Calculate padding at the beginning based on current offset.
7651  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7652 
7653  // Calculate required margin at the end.
7654  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7655 
7656  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7657  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7658  {
7659  return false;
7660  }
7661 
7662  // Check next suballocations for BufferImageGranularity conflicts.
7663  // If conflict exists, allocation cannot be made here.
7664  if(bufferImageGranularity > 1)
7665  {
7666  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7667  ++nextSuballocItem;
7668  while(nextSuballocItem != m_Suballocations.cend())
7669  {
7670  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7671  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7672  {
7673  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7674  {
7675  return false;
7676  }
7677  }
7678  else
7679  {
7680  // Already on next page.
7681  break;
7682  }
7683  ++nextSuballocItem;
7684  }
7685  }
7686  }
7687 
7688  // All tests passed: Success. pOffset is already filled.
7689  return true;
7690 }
7691 
7692 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7693 {
7694  VMA_ASSERT(item != m_Suballocations.end());
7695  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7696 
7697  VmaSuballocationList::iterator nextItem = item;
7698  ++nextItem;
7699  VMA_ASSERT(nextItem != m_Suballocations.end());
7700  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7701 
7702  item->size += nextItem->size;
7703  --m_FreeCount;
7704  m_Suballocations.erase(nextItem);
7705 }
7706 
7707 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7708 {
7709  // Change this suballocation to be marked as free.
7710  VmaSuballocation& suballoc = *suballocItem;
7711  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7712  suballoc.hAllocation = VK_NULL_HANDLE;
7713 
7714  // Update totals.
7715  ++m_FreeCount;
7716  m_SumFreeSize += suballoc.size;
7717 
7718  // Merge with previous and/or next suballocation if it's also free.
7719  bool mergeWithNext = false;
7720  bool mergeWithPrev = false;
7721 
7722  VmaSuballocationList::iterator nextItem = suballocItem;
7723  ++nextItem;
7724  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7725  {
7726  mergeWithNext = true;
7727  }
7728 
7729  VmaSuballocationList::iterator prevItem = suballocItem;
7730  if(suballocItem != m_Suballocations.begin())
7731  {
7732  --prevItem;
7733  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7734  {
7735  mergeWithPrev = true;
7736  }
7737  }
7738 
7739  if(mergeWithNext)
7740  {
7741  UnregisterFreeSuballocation(nextItem);
7742  MergeFreeWithNext(suballocItem);
7743  }
7744 
7745  if(mergeWithPrev)
7746  {
7747  UnregisterFreeSuballocation(prevItem);
7748  MergeFreeWithNext(prevItem);
7749  RegisterFreeSuballocation(prevItem);
7750  return prevItem;
7751  }
7752  else
7753  {
7754  RegisterFreeSuballocation(suballocItem);
7755  return suballocItem;
7756  }
7757 }
7758 
7759 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7760 {
7761  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7762  VMA_ASSERT(item->size > 0);
7763 
7764  // You may want to enable this validation at the beginning or at the end of
7765  // this function, depending on what do you want to check.
7766  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7767 
7768  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7769  {
7770  if(m_FreeSuballocationsBySize.empty())
7771  {
7772  m_FreeSuballocationsBySize.push_back(item);
7773  }
7774  else
7775  {
7776  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7777  }
7778  }
7779 
7780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7781 }
7782 
7783 
7784 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7785 {
7786  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7787  VMA_ASSERT(item->size > 0);
7788 
7789  // You may want to enable this validation at the beginning or at the end of
7790  // this function, depending on what do you want to check.
7791  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7792 
7793  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7794  {
7795  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7796  m_FreeSuballocationsBySize.data(),
7797  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7798  item,
7799  VmaSuballocationItemSizeLess());
7800  for(size_t index = it - m_FreeSuballocationsBySize.data();
7801  index < m_FreeSuballocationsBySize.size();
7802  ++index)
7803  {
7804  if(m_FreeSuballocationsBySize[index] == item)
7805  {
7806  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7807  return;
7808  }
7809  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7810  }
7811  VMA_ASSERT(0 && "Not found.");
7812  }
7813 
7814  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7815 }
7816 
7818 // class VmaBlockMetadata_Linear
7819 
7820 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7821  VmaBlockMetadata(hAllocator),
7822  m_SumFreeSize(0),
7823  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7824  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7825  m_1stVectorIndex(0),
7826  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7827  m_1stNullItemsBeginCount(0),
7828  m_1stNullItemsMiddleCount(0),
7829  m_2ndNullItemsCount(0)
7830 {
7831 }
7832 
7833 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7834 {
7835 }
7836 
7837 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7838 {
7839  VmaBlockMetadata::Init(size);
7840  m_SumFreeSize = size;
7841 }
7842 
7843 bool VmaBlockMetadata_Linear::Validate() const
7844 {
7845  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7846  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7847 
7848  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7849  VMA_VALIDATE(!suballocations1st.empty() ||
7850  suballocations2nd.empty() ||
7851  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7852 
7853  if(!suballocations1st.empty())
7854  {
7855  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7856  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7857  // Null item at the end should be just pop_back().
7858  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7859  }
7860  if(!suballocations2nd.empty())
7861  {
7862  // Null item at the end should be just pop_back().
7863  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7864  }
7865 
7866  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7867  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7868 
7869  VkDeviceSize sumUsedSize = 0;
7870  const size_t suballoc1stCount = suballocations1st.size();
7871  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7872 
7873  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7874  {
7875  const size_t suballoc2ndCount = suballocations2nd.size();
7876  size_t nullItem2ndCount = 0;
7877  for(size_t i = 0; i < suballoc2ndCount; ++i)
7878  {
7879  const VmaSuballocation& suballoc = suballocations2nd[i];
7880  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7881 
7882  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7883  VMA_VALIDATE(suballoc.offset >= offset);
7884 
7885  if(!currFree)
7886  {
7887  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7888  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7889  sumUsedSize += suballoc.size;
7890  }
7891  else
7892  {
7893  ++nullItem2ndCount;
7894  }
7895 
7896  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7897  }
7898 
7899  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7900  }
7901 
7902  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7903  {
7904  const VmaSuballocation& suballoc = suballocations1st[i];
7905  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7906  suballoc.hAllocation == VK_NULL_HANDLE);
7907  }
7908 
7909  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7910 
7911  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7912  {
7913  const VmaSuballocation& suballoc = suballocations1st[i];
7914  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7915 
7916  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7917  VMA_VALIDATE(suballoc.offset >= offset);
7918  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7919 
7920  if(!currFree)
7921  {
7922  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7923  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7924  sumUsedSize += suballoc.size;
7925  }
7926  else
7927  {
7928  ++nullItem1stCount;
7929  }
7930 
7931  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7932  }
7933  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7934 
7935  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7936  {
7937  const size_t suballoc2ndCount = suballocations2nd.size();
7938  size_t nullItem2ndCount = 0;
7939  for(size_t i = suballoc2ndCount; i--; )
7940  {
7941  const VmaSuballocation& suballoc = suballocations2nd[i];
7942  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7943 
7944  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7945  VMA_VALIDATE(suballoc.offset >= offset);
7946 
7947  if(!currFree)
7948  {
7949  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7950  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7951  sumUsedSize += suballoc.size;
7952  }
7953  else
7954  {
7955  ++nullItem2ndCount;
7956  }
7957 
7958  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7959  }
7960 
7961  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7962  }
7963 
7964  VMA_VALIDATE(offset <= GetSize());
7965  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7966 
7967  return true;
7968 }
7969 
7970 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7971 {
7972  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7973  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7974 }
7975 
7976 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7977 {
7978  const VkDeviceSize size = GetSize();
7979 
7980  /*
7981  We don't consider gaps inside allocation vectors with freed allocations because
7982  they are not suitable for reuse in linear allocator. We consider only space that
7983  is available for new allocations.
7984  */
7985  if(IsEmpty())
7986  {
7987  return size;
7988  }
7989 
7990  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7991 
7992  switch(m_2ndVectorMode)
7993  {
7994  case SECOND_VECTOR_EMPTY:
7995  /*
7996  Available space is after end of 1st, as well as before beginning of 1st (which
7997  whould make it a ring buffer).
7998  */
7999  {
8000  const size_t suballocations1stCount = suballocations1st.size();
8001  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8002  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8003  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8004  return VMA_MAX(
8005  firstSuballoc.offset,
8006  size - (lastSuballoc.offset + lastSuballoc.size));
8007  }
8008  break;
8009 
8010  case SECOND_VECTOR_RING_BUFFER:
8011  /*
8012  Available space is only between end of 2nd and beginning of 1st.
8013  */
8014  {
8015  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8016  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8017  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8018  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8019  }
8020  break;
8021 
8022  case SECOND_VECTOR_DOUBLE_STACK:
8023  /*
8024  Available space is only between end of 1st and top of 2nd.
8025  */
8026  {
8027  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8028  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8029  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8030  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8031  }
8032  break;
8033 
8034  default:
8035  VMA_ASSERT(0);
8036  return 0;
8037  }
8038 }
8039 
8040 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8041 {
8042  const VkDeviceSize size = GetSize();
8043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8045  const size_t suballoc1stCount = suballocations1st.size();
8046  const size_t suballoc2ndCount = suballocations2nd.size();
8047 
8048  outInfo.blockCount = 1;
8049  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8050  outInfo.unusedRangeCount = 0;
8051  outInfo.usedBytes = 0;
8052  outInfo.allocationSizeMin = UINT64_MAX;
8053  outInfo.allocationSizeMax = 0;
8054  outInfo.unusedRangeSizeMin = UINT64_MAX;
8055  outInfo.unusedRangeSizeMax = 0;
8056 
8057  VkDeviceSize lastOffset = 0;
8058 
8059  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8060  {
8061  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8062  size_t nextAlloc2ndIndex = 0;
8063  while(lastOffset < freeSpace2ndTo1stEnd)
8064  {
8065  // Find next non-null allocation or move nextAllocIndex to the end.
8066  while(nextAlloc2ndIndex < suballoc2ndCount &&
8067  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8068  {
8069  ++nextAlloc2ndIndex;
8070  }
8071 
8072  // Found non-null allocation.
8073  if(nextAlloc2ndIndex < suballoc2ndCount)
8074  {
8075  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8076 
8077  // 1. Process free space before this allocation.
8078  if(lastOffset < suballoc.offset)
8079  {
8080  // There is free space from lastOffset to suballoc.offset.
8081  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8082  ++outInfo.unusedRangeCount;
8083  outInfo.unusedBytes += unusedRangeSize;
8084  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8085  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8086  }
8087 
8088  // 2. Process this allocation.
8089  // There is allocation with suballoc.offset, suballoc.size.
8090  outInfo.usedBytes += suballoc.size;
8091  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8092  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8093 
8094  // 3. Prepare for next iteration.
8095  lastOffset = suballoc.offset + suballoc.size;
8096  ++nextAlloc2ndIndex;
8097  }
8098  // We are at the end.
8099  else
8100  {
8101  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8102  if(lastOffset < freeSpace2ndTo1stEnd)
8103  {
8104  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8105  ++outInfo.unusedRangeCount;
8106  outInfo.unusedBytes += unusedRangeSize;
8107  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8108  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8109  }
8110 
8111  // End of loop.
8112  lastOffset = freeSpace2ndTo1stEnd;
8113  }
8114  }
8115  }
8116 
8117  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8118  const VkDeviceSize freeSpace1stTo2ndEnd =
8119  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8120  while(lastOffset < freeSpace1stTo2ndEnd)
8121  {
8122  // Find next non-null allocation or move nextAllocIndex to the end.
8123  while(nextAlloc1stIndex < suballoc1stCount &&
8124  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8125  {
8126  ++nextAlloc1stIndex;
8127  }
8128 
8129  // Found non-null allocation.
8130  if(nextAlloc1stIndex < suballoc1stCount)
8131  {
8132  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8133 
8134  // 1. Process free space before this allocation.
8135  if(lastOffset < suballoc.offset)
8136  {
8137  // There is free space from lastOffset to suballoc.offset.
8138  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8139  ++outInfo.unusedRangeCount;
8140  outInfo.unusedBytes += unusedRangeSize;
8141  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8142  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8143  }
8144 
8145  // 2. Process this allocation.
8146  // There is allocation with suballoc.offset, suballoc.size.
8147  outInfo.usedBytes += suballoc.size;
8148  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8149  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8150 
8151  // 3. Prepare for next iteration.
8152  lastOffset = suballoc.offset + suballoc.size;
8153  ++nextAlloc1stIndex;
8154  }
8155  // We are at the end.
8156  else
8157  {
8158  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8159  if(lastOffset < freeSpace1stTo2ndEnd)
8160  {
8161  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8162  ++outInfo.unusedRangeCount;
8163  outInfo.unusedBytes += unusedRangeSize;
8164  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8165  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8166  }
8167 
8168  // End of loop.
8169  lastOffset = freeSpace1stTo2ndEnd;
8170  }
8171  }
8172 
8173  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8174  {
8175  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8176  while(lastOffset < size)
8177  {
8178  // Find next non-null allocation or move nextAllocIndex to the end.
8179  while(nextAlloc2ndIndex != SIZE_MAX &&
8180  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8181  {
8182  --nextAlloc2ndIndex;
8183  }
8184 
8185  // Found non-null allocation.
8186  if(nextAlloc2ndIndex != SIZE_MAX)
8187  {
8188  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8189 
8190  // 1. Process free space before this allocation.
8191  if(lastOffset < suballoc.offset)
8192  {
8193  // There is free space from lastOffset to suballoc.offset.
8194  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8195  ++outInfo.unusedRangeCount;
8196  outInfo.unusedBytes += unusedRangeSize;
8197  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8198  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8199  }
8200 
8201  // 2. Process this allocation.
8202  // There is allocation with suballoc.offset, suballoc.size.
8203  outInfo.usedBytes += suballoc.size;
8204  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8205  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8206 
8207  // 3. Prepare for next iteration.
8208  lastOffset = suballoc.offset + suballoc.size;
8209  --nextAlloc2ndIndex;
8210  }
8211  // We are at the end.
8212  else
8213  {
8214  // There is free space from lastOffset to size.
8215  if(lastOffset < size)
8216  {
8217  const VkDeviceSize unusedRangeSize = size - lastOffset;
8218  ++outInfo.unusedRangeCount;
8219  outInfo.unusedBytes += unusedRangeSize;
8220  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8221  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8222  }
8223 
8224  // End of loop.
8225  lastOffset = size;
8226  }
8227  }
8228  }
8229 
8230  outInfo.unusedBytes = size - outInfo.usedBytes;
8231 }
8232 
8233 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8234 {
8235  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8236  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8237  const VkDeviceSize size = GetSize();
8238  const size_t suballoc1stCount = suballocations1st.size();
8239  const size_t suballoc2ndCount = suballocations2nd.size();
8240 
8241  inoutStats.size += size;
8242 
8243  VkDeviceSize lastOffset = 0;
8244 
8245  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8246  {
8247  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8248  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8249  while(lastOffset < freeSpace2ndTo1stEnd)
8250  {
8251  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8252  while(nextAlloc2ndIndex < suballoc2ndCount &&
8253  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8254  {
8255  ++nextAlloc2ndIndex;
8256  }
8257 
8258  // Found non-null allocation.
8259  if(nextAlloc2ndIndex < suballoc2ndCount)
8260  {
8261  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8262 
8263  // 1. Process free space before this allocation.
8264  if(lastOffset < suballoc.offset)
8265  {
8266  // There is free space from lastOffset to suballoc.offset.
8267  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8268  inoutStats.unusedSize += unusedRangeSize;
8269  ++inoutStats.unusedRangeCount;
8270  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8271  }
8272 
8273  // 2. Process this allocation.
8274  // There is allocation with suballoc.offset, suballoc.size.
8275  ++inoutStats.allocationCount;
8276 
8277  // 3. Prepare for next iteration.
8278  lastOffset = suballoc.offset + suballoc.size;
8279  ++nextAlloc2ndIndex;
8280  }
8281  // We are at the end.
8282  else
8283  {
8284  if(lastOffset < freeSpace2ndTo1stEnd)
8285  {
8286  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8287  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8288  inoutStats.unusedSize += unusedRangeSize;
8289  ++inoutStats.unusedRangeCount;
8290  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8291  }
8292 
8293  // End of loop.
8294  lastOffset = freeSpace2ndTo1stEnd;
8295  }
8296  }
8297  }
8298 
8299  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8300  const VkDeviceSize freeSpace1stTo2ndEnd =
8301  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8302  while(lastOffset < freeSpace1stTo2ndEnd)
8303  {
8304  // Find next non-null allocation or move nextAllocIndex to the end.
8305  while(nextAlloc1stIndex < suballoc1stCount &&
8306  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8307  {
8308  ++nextAlloc1stIndex;
8309  }
8310 
8311  // Found non-null allocation.
8312  if(nextAlloc1stIndex < suballoc1stCount)
8313  {
8314  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8315 
8316  // 1. Process free space before this allocation.
8317  if(lastOffset < suballoc.offset)
8318  {
8319  // There is free space from lastOffset to suballoc.offset.
8320  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8321  inoutStats.unusedSize += unusedRangeSize;
8322  ++inoutStats.unusedRangeCount;
8323  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8324  }
8325 
8326  // 2. Process this allocation.
8327  // There is allocation with suballoc.offset, suballoc.size.
8328  ++inoutStats.allocationCount;
8329 
8330  // 3. Prepare for next iteration.
8331  lastOffset = suballoc.offset + suballoc.size;
8332  ++nextAlloc1stIndex;
8333  }
8334  // We are at the end.
8335  else
8336  {
8337  if(lastOffset < freeSpace1stTo2ndEnd)
8338  {
8339  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8340  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8341  inoutStats.unusedSize += unusedRangeSize;
8342  ++inoutStats.unusedRangeCount;
8343  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8344  }
8345 
8346  // End of loop.
8347  lastOffset = freeSpace1stTo2ndEnd;
8348  }
8349  }
8350 
8351  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8352  {
8353  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8354  while(lastOffset < size)
8355  {
8356  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8357  while(nextAlloc2ndIndex != SIZE_MAX &&
8358  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8359  {
8360  --nextAlloc2ndIndex;
8361  }
8362 
8363  // Found non-null allocation.
8364  if(nextAlloc2ndIndex != SIZE_MAX)
8365  {
8366  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8367 
8368  // 1. Process free space before this allocation.
8369  if(lastOffset < suballoc.offset)
8370  {
8371  // There is free space from lastOffset to suballoc.offset.
8372  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8373  inoutStats.unusedSize += unusedRangeSize;
8374  ++inoutStats.unusedRangeCount;
8375  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8376  }
8377 
8378  // 2. Process this allocation.
8379  // There is allocation with suballoc.offset, suballoc.size.
8380  ++inoutStats.allocationCount;
8381 
8382  // 3. Prepare for next iteration.
8383  lastOffset = suballoc.offset + suballoc.size;
8384  --nextAlloc2ndIndex;
8385  }
8386  // We are at the end.
8387  else
8388  {
8389  if(lastOffset < size)
8390  {
8391  // There is free space from lastOffset to size.
8392  const VkDeviceSize unusedRangeSize = size - lastOffset;
8393  inoutStats.unusedSize += unusedRangeSize;
8394  ++inoutStats.unusedRangeCount;
8395  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8396  }
8397 
8398  // End of loop.
8399  lastOffset = size;
8400  }
8401  }
8402  }
8403 }
8404 
8405 #if VMA_STATS_STRING_ENABLED
8406 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8407 {
8408  const VkDeviceSize size = GetSize();
8409  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8410  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8411  const size_t suballoc1stCount = suballocations1st.size();
8412  const size_t suballoc2ndCount = suballocations2nd.size();
8413 
8414  // FIRST PASS
8415 
8416  size_t unusedRangeCount = 0;
8417  VkDeviceSize usedBytes = 0;
8418 
8419  VkDeviceSize lastOffset = 0;
8420 
8421  size_t alloc2ndCount = 0;
8422  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8423  {
8424  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8425  size_t nextAlloc2ndIndex = 0;
8426  while(lastOffset < freeSpace2ndTo1stEnd)
8427  {
8428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8429  while(nextAlloc2ndIndex < suballoc2ndCount &&
8430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8431  {
8432  ++nextAlloc2ndIndex;
8433  }
8434 
8435  // Found non-null allocation.
8436  if(nextAlloc2ndIndex < suballoc2ndCount)
8437  {
8438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8439 
8440  // 1. Process free space before this allocation.
8441  if(lastOffset < suballoc.offset)
8442  {
8443  // There is free space from lastOffset to suballoc.offset.
8444  ++unusedRangeCount;
8445  }
8446 
8447  // 2. Process this allocation.
8448  // There is allocation with suballoc.offset, suballoc.size.
8449  ++alloc2ndCount;
8450  usedBytes += suballoc.size;
8451 
8452  // 3. Prepare for next iteration.
8453  lastOffset = suballoc.offset + suballoc.size;
8454  ++nextAlloc2ndIndex;
8455  }
8456  // We are at the end.
8457  else
8458  {
8459  if(lastOffset < freeSpace2ndTo1stEnd)
8460  {
8461  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8462  ++unusedRangeCount;
8463  }
8464 
8465  // End of loop.
8466  lastOffset = freeSpace2ndTo1stEnd;
8467  }
8468  }
8469  }
8470 
8471  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8472  size_t alloc1stCount = 0;
8473  const VkDeviceSize freeSpace1stTo2ndEnd =
8474  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8475  while(lastOffset < freeSpace1stTo2ndEnd)
8476  {
8477  // Find next non-null allocation or move nextAllocIndex to the end.
8478  while(nextAlloc1stIndex < suballoc1stCount &&
8479  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8480  {
8481  ++nextAlloc1stIndex;
8482  }
8483 
8484  // Found non-null allocation.
8485  if(nextAlloc1stIndex < suballoc1stCount)
8486  {
8487  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8488 
8489  // 1. Process free space before this allocation.
8490  if(lastOffset < suballoc.offset)
8491  {
8492  // There is free space from lastOffset to suballoc.offset.
8493  ++unusedRangeCount;
8494  }
8495 
8496  // 2. Process this allocation.
8497  // There is allocation with suballoc.offset, suballoc.size.
8498  ++alloc1stCount;
8499  usedBytes += suballoc.size;
8500 
8501  // 3. Prepare for next iteration.
8502  lastOffset = suballoc.offset + suballoc.size;
8503  ++nextAlloc1stIndex;
8504  }
8505  // We are at the end.
8506  else
8507  {
8508  if(lastOffset < size)
8509  {
8510  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8511  ++unusedRangeCount;
8512  }
8513 
8514  // End of loop.
8515  lastOffset = freeSpace1stTo2ndEnd;
8516  }
8517  }
8518 
8519  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8520  {
8521  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8522  while(lastOffset < size)
8523  {
8524  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8525  while(nextAlloc2ndIndex != SIZE_MAX &&
8526  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8527  {
8528  --nextAlloc2ndIndex;
8529  }
8530 
8531  // Found non-null allocation.
8532  if(nextAlloc2ndIndex != SIZE_MAX)
8533  {
8534  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8535 
8536  // 1. Process free space before this allocation.
8537  if(lastOffset < suballoc.offset)
8538  {
8539  // There is free space from lastOffset to suballoc.offset.
8540  ++unusedRangeCount;
8541  }
8542 
8543  // 2. Process this allocation.
8544  // There is allocation with suballoc.offset, suballoc.size.
8545  ++alloc2ndCount;
8546  usedBytes += suballoc.size;
8547 
8548  // 3. Prepare for next iteration.
8549  lastOffset = suballoc.offset + suballoc.size;
8550  --nextAlloc2ndIndex;
8551  }
8552  // We are at the end.
8553  else
8554  {
8555  if(lastOffset < size)
8556  {
8557  // There is free space from lastOffset to size.
8558  ++unusedRangeCount;
8559  }
8560 
8561  // End of loop.
8562  lastOffset = size;
8563  }
8564  }
8565  }
8566 
8567  const VkDeviceSize unusedBytes = size - usedBytes;
8568  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8569 
8570  // SECOND PASS
8571  lastOffset = 0;
8572 
8573  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8574  {
8575  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8576  size_t nextAlloc2ndIndex = 0;
8577  while(lastOffset < freeSpace2ndTo1stEnd)
8578  {
8579  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8580  while(nextAlloc2ndIndex < suballoc2ndCount &&
8581  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8582  {
8583  ++nextAlloc2ndIndex;
8584  }
8585 
8586  // Found non-null allocation.
8587  if(nextAlloc2ndIndex < suballoc2ndCount)
8588  {
8589  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8590 
8591  // 1. Process free space before this allocation.
8592  if(lastOffset < suballoc.offset)
8593  {
8594  // There is free space from lastOffset to suballoc.offset.
8595  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8596  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8597  }
8598 
8599  // 2. Process this allocation.
8600  // There is allocation with suballoc.offset, suballoc.size.
8601  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8602 
8603  // 3. Prepare for next iteration.
8604  lastOffset = suballoc.offset + suballoc.size;
8605  ++nextAlloc2ndIndex;
8606  }
8607  // We are at the end.
8608  else
8609  {
8610  if(lastOffset < freeSpace2ndTo1stEnd)
8611  {
8612  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8613  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8614  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8615  }
8616 
8617  // End of loop.
8618  lastOffset = freeSpace2ndTo1stEnd;
8619  }
8620  }
8621  }
8622 
8623  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8624  while(lastOffset < freeSpace1stTo2ndEnd)
8625  {
8626  // Find next non-null allocation or move nextAllocIndex to the end.
8627  while(nextAlloc1stIndex < suballoc1stCount &&
8628  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8629  {
8630  ++nextAlloc1stIndex;
8631  }
8632 
8633  // Found non-null allocation.
8634  if(nextAlloc1stIndex < suballoc1stCount)
8635  {
8636  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8637 
8638  // 1. Process free space before this allocation.
8639  if(lastOffset < suballoc.offset)
8640  {
8641  // There is free space from lastOffset to suballoc.offset.
8642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8644  }
8645 
8646  // 2. Process this allocation.
8647  // There is allocation with suballoc.offset, suballoc.size.
8648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8649 
8650  // 3. Prepare for next iteration.
8651  lastOffset = suballoc.offset + suballoc.size;
8652  ++nextAlloc1stIndex;
8653  }
8654  // We are at the end.
8655  else
8656  {
8657  if(lastOffset < freeSpace1stTo2ndEnd)
8658  {
8659  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8660  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8662  }
8663 
8664  // End of loop.
8665  lastOffset = freeSpace1stTo2ndEnd;
8666  }
8667  }
8668 
8669  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8670  {
8671  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8672  while(lastOffset < size)
8673  {
8674  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8675  while(nextAlloc2ndIndex != SIZE_MAX &&
8676  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8677  {
8678  --nextAlloc2ndIndex;
8679  }
8680 
8681  // Found non-null allocation.
8682  if(nextAlloc2ndIndex != SIZE_MAX)
8683  {
8684  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8685 
8686  // 1. Process free space before this allocation.
8687  if(lastOffset < suballoc.offset)
8688  {
8689  // There is free space from lastOffset to suballoc.offset.
8690  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8692  }
8693 
8694  // 2. Process this allocation.
8695  // There is allocation with suballoc.offset, suballoc.size.
8696  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8697 
8698  // 3. Prepare for next iteration.
8699  lastOffset = suballoc.offset + suballoc.size;
8700  --nextAlloc2ndIndex;
8701  }
8702  // We are at the end.
8703  else
8704  {
8705  if(lastOffset < size)
8706  {
8707  // There is free space from lastOffset to size.
8708  const VkDeviceSize unusedRangeSize = size - lastOffset;
8709  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8710  }
8711 
8712  // End of loop.
8713  lastOffset = size;
8714  }
8715  }
8716  }
8717 
8718  PrintDetailedMap_End(json);
8719 }
8720 #endif // #if VMA_STATS_STRING_ENABLED
8721 
8722 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8723  uint32_t currentFrameIndex,
8724  uint32_t frameInUseCount,
8725  VkDeviceSize bufferImageGranularity,
8726  VkDeviceSize allocSize,
8727  VkDeviceSize allocAlignment,
8728  bool upperAddress,
8729  VmaSuballocationType allocType,
8730  bool canMakeOtherLost,
8731  uint32_t strategy,
8732  VmaAllocationRequest* pAllocationRequest)
8733 {
8734  VMA_ASSERT(allocSize > 0);
8735  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8736  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8737  VMA_HEAVY_ASSERT(Validate());
8738 
8739  const VkDeviceSize size = GetSize();
8740  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8741  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8742 
8743  if(upperAddress)
8744  {
8745  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8746  {
8747  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8748  return false;
8749  }
8750 
8751  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8752  if(allocSize > size)
8753  {
8754  return false;
8755  }
8756  VkDeviceSize resultBaseOffset = size - allocSize;
8757  if(!suballocations2nd.empty())
8758  {
8759  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8760  resultBaseOffset = lastSuballoc.offset - allocSize;
8761  if(allocSize > lastSuballoc.offset)
8762  {
8763  return false;
8764  }
8765  }
8766 
8767  // Start from offset equal to end of free space.
8768  VkDeviceSize resultOffset = resultBaseOffset;
8769 
8770  // Apply VMA_DEBUG_MARGIN at the end.
8771  if(VMA_DEBUG_MARGIN > 0)
8772  {
8773  if(resultOffset < VMA_DEBUG_MARGIN)
8774  {
8775  return false;
8776  }
8777  resultOffset -= VMA_DEBUG_MARGIN;
8778  }
8779 
8780  // Apply alignment.
8781  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8782 
8783  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8784  // Make bigger alignment if necessary.
8785  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8786  {
8787  bool bufferImageGranularityConflict = false;
8788  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8789  {
8790  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8791  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8792  {
8793  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8794  {
8795  bufferImageGranularityConflict = true;
8796  break;
8797  }
8798  }
8799  else
8800  // Already on previous page.
8801  break;
8802  }
8803  if(bufferImageGranularityConflict)
8804  {
8805  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8806  }
8807  }
8808 
8809  // There is enough free space.
8810  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8811  suballocations1st.back().offset + suballocations1st.back().size :
8812  0;
8813  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8814  {
8815  // Check previous suballocations for BufferImageGranularity conflicts.
8816  // If conflict exists, allocation cannot be made here.
8817  if(bufferImageGranularity > 1)
8818  {
8819  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8820  {
8821  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8822  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8823  {
8824  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8825  {
8826  return false;
8827  }
8828  }
8829  else
8830  {
8831  // Already on next page.
8832  break;
8833  }
8834  }
8835  }
8836 
8837  // All tests passed: Success.
8838  pAllocationRequest->offset = resultOffset;
8839  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8840  pAllocationRequest->sumItemSize = 0;
8841  // pAllocationRequest->item unused.
8842  pAllocationRequest->itemsToMakeLostCount = 0;
8843  return true;
8844  }
8845  }
8846  else // !upperAddress
8847  {
8848  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8849  {
8850  // Try to allocate at the end of 1st vector.
8851 
8852  VkDeviceSize resultBaseOffset = 0;
8853  if(!suballocations1st.empty())
8854  {
8855  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8856  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8857  }
8858 
8859  // Start from offset equal to beginning of free space.
8860  VkDeviceSize resultOffset = resultBaseOffset;
8861 
8862  // Apply VMA_DEBUG_MARGIN at the beginning.
8863  if(VMA_DEBUG_MARGIN > 0)
8864  {
8865  resultOffset += VMA_DEBUG_MARGIN;
8866  }
8867 
8868  // Apply alignment.
8869  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8870 
8871  // Check previous suballocations for BufferImageGranularity conflicts.
8872  // Make bigger alignment if necessary.
8873  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8874  {
8875  bool bufferImageGranularityConflict = false;
8876  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8877  {
8878  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8879  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8880  {
8881  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8882  {
8883  bufferImageGranularityConflict = true;
8884  break;
8885  }
8886  }
8887  else
8888  // Already on previous page.
8889  break;
8890  }
8891  if(bufferImageGranularityConflict)
8892  {
8893  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8894  }
8895  }
8896 
8897  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8898  suballocations2nd.back().offset : size;
8899 
8900  // There is enough free space at the end after alignment.
8901  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8902  {
8903  // Check next suballocations for BufferImageGranularity conflicts.
8904  // If conflict exists, allocation cannot be made here.
8905  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8906  {
8907  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8908  {
8909  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8910  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8911  {
8912  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8913  {
8914  return false;
8915  }
8916  }
8917  else
8918  {
8919  // Already on previous page.
8920  break;
8921  }
8922  }
8923  }
8924 
8925  // All tests passed: Success.
8926  pAllocationRequest->offset = resultOffset;
8927  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8928  pAllocationRequest->sumItemSize = 0;
8929  // pAllocationRequest->item unused.
8930  pAllocationRequest->itemsToMakeLostCount = 0;
8931  return true;
8932  }
8933  }
8934 
8935  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8936  // beginning of 1st vector as the end of free space.
8937  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8938  {
8939  VMA_ASSERT(!suballocations1st.empty());
8940 
8941  VkDeviceSize resultBaseOffset = 0;
8942  if(!suballocations2nd.empty())
8943  {
8944  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8945  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8946  }
8947 
8948  // Start from offset equal to beginning of free space.
8949  VkDeviceSize resultOffset = resultBaseOffset;
8950 
8951  // Apply VMA_DEBUG_MARGIN at the beginning.
8952  if(VMA_DEBUG_MARGIN > 0)
8953  {
8954  resultOffset += VMA_DEBUG_MARGIN;
8955  }
8956 
8957  // Apply alignment.
8958  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8959 
8960  // Check previous suballocations for BufferImageGranularity conflicts.
8961  // Make bigger alignment if necessary.
8962  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8963  {
8964  bool bufferImageGranularityConflict = false;
8965  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8966  {
8967  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8968  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8969  {
8970  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8971  {
8972  bufferImageGranularityConflict = true;
8973  break;
8974  }
8975  }
8976  else
8977  // Already on previous page.
8978  break;
8979  }
8980  if(bufferImageGranularityConflict)
8981  {
8982  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8983  }
8984  }
8985 
8986  pAllocationRequest->itemsToMakeLostCount = 0;
8987  pAllocationRequest->sumItemSize = 0;
8988  size_t index1st = m_1stNullItemsBeginCount;
8989 
8990  if(canMakeOtherLost)
8991  {
8992  while(index1st < suballocations1st.size() &&
8993  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8994  {
8995  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8996  const VmaSuballocation& suballoc = suballocations1st[index1st];
8997  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8998  {
8999  // No problem.
9000  }
9001  else
9002  {
9003  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9004  if(suballoc.hAllocation->CanBecomeLost() &&
9005  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9006  {
9007  ++pAllocationRequest->itemsToMakeLostCount;
9008  pAllocationRequest->sumItemSize += suballoc.size;
9009  }
9010  else
9011  {
9012  return false;
9013  }
9014  }
9015  ++index1st;
9016  }
9017 
9018  // Check next suballocations for BufferImageGranularity conflicts.
9019  // If conflict exists, we must mark more allocations lost or fail.
9020  if(bufferImageGranularity > 1)
9021  {
9022  while(index1st < suballocations1st.size())
9023  {
9024  const VmaSuballocation& suballoc = suballocations1st[index1st];
9025  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9026  {
9027  if(suballoc.hAllocation != VK_NULL_HANDLE)
9028  {
9029  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9030  if(suballoc.hAllocation->CanBecomeLost() &&
9031  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9032  {
9033  ++pAllocationRequest->itemsToMakeLostCount;
9034  pAllocationRequest->sumItemSize += suballoc.size;
9035  }
9036  else
9037  {
9038  return false;
9039  }
9040  }
9041  }
9042  else
9043  {
9044  // Already on next page.
9045  break;
9046  }
9047  ++index1st;
9048  }
9049  }
9050  }
9051 
9052  // There is enough free space at the end after alignment.
9053  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9054  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9055  {
9056  // Check next suballocations for BufferImageGranularity conflicts.
9057  // If conflict exists, allocation cannot be made here.
9058  if(bufferImageGranularity > 1)
9059  {
9060  for(size_t nextSuballocIndex = index1st;
9061  nextSuballocIndex < suballocations1st.size();
9062  nextSuballocIndex++)
9063  {
9064  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9065  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9066  {
9067  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9068  {
9069  return false;
9070  }
9071  }
9072  else
9073  {
9074  // Already on next page.
9075  break;
9076  }
9077  }
9078  }
9079 
9080  // All tests passed: Success.
9081  pAllocationRequest->offset = resultOffset;
9082  pAllocationRequest->sumFreeSize =
9083  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9084  - resultBaseOffset
9085  - pAllocationRequest->sumItemSize;
9086  // pAllocationRequest->item unused.
9087  return true;
9088  }
9089  }
9090  }
9091 
9092  return false;
9093 }
9094 
9095 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9096  uint32_t currentFrameIndex,
9097  uint32_t frameInUseCount,
9098  VmaAllocationRequest* pAllocationRequest)
9099 {
9100  if(pAllocationRequest->itemsToMakeLostCount == 0)
9101  {
9102  return true;
9103  }
9104 
9105  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9106 
9107  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9108  size_t index1st = m_1stNullItemsBeginCount;
9109  size_t madeLostCount = 0;
9110  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9111  {
9112  VMA_ASSERT(index1st < suballocations1st.size());
9113  VmaSuballocation& suballoc = suballocations1st[index1st];
9114  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9115  {
9116  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9117  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9118  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9119  {
9120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9121  suballoc.hAllocation = VK_NULL_HANDLE;
9122  m_SumFreeSize += suballoc.size;
9123  ++m_1stNullItemsMiddleCount;
9124  ++madeLostCount;
9125  }
9126  else
9127  {
9128  return false;
9129  }
9130  }
9131  ++index1st;
9132  }
9133 
9134  CleanupAfterFree();
9135  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9136 
9137  return true;
9138 }
9139 
9140 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9141 {
9142  uint32_t lostAllocationCount = 0;
9143 
9144  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9145  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9146  {
9147  VmaSuballocation& suballoc = suballocations1st[i];
9148  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9149  suballoc.hAllocation->CanBecomeLost() &&
9150  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9151  {
9152  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9153  suballoc.hAllocation = VK_NULL_HANDLE;
9154  ++m_1stNullItemsMiddleCount;
9155  m_SumFreeSize += suballoc.size;
9156  ++lostAllocationCount;
9157  }
9158  }
9159 
9160  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9161  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9162  {
9163  VmaSuballocation& suballoc = suballocations2nd[i];
9164  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9165  suballoc.hAllocation->CanBecomeLost() &&
9166  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9167  {
9168  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9169  suballoc.hAllocation = VK_NULL_HANDLE;
9170  ++m_2ndNullItemsCount;
9171  ++lostAllocationCount;
9172  }
9173  }
9174 
9175  if(lostAllocationCount)
9176  {
9177  CleanupAfterFree();
9178  }
9179 
9180  return lostAllocationCount;
9181 }
9182 
9183 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9184 {
9185  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9186  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9187  {
9188  const VmaSuballocation& suballoc = suballocations1st[i];
9189  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9190  {
9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9192  {
9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9194  return VK_ERROR_VALIDATION_FAILED_EXT;
9195  }
9196  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9197  {
9198  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9199  return VK_ERROR_VALIDATION_FAILED_EXT;
9200  }
9201  }
9202  }
9203 
9204  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9205  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9206  {
9207  const VmaSuballocation& suballoc = suballocations2nd[i];
9208  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9209  {
9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9211  {
9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9213  return VK_ERROR_VALIDATION_FAILED_EXT;
9214  }
9215  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9216  {
9217  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9218  return VK_ERROR_VALIDATION_FAILED_EXT;
9219  }
9220  }
9221  }
9222 
9223  return VK_SUCCESS;
9224 }
9225 
9226 void VmaBlockMetadata_Linear::Alloc(
9227  const VmaAllocationRequest& request,
9228  VmaSuballocationType type,
9229  VkDeviceSize allocSize,
9230  bool upperAddress,
9231  VmaAllocation hAllocation)
9232 {
9233  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9234 
9235  if(upperAddress)
9236  {
9237  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9238  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9239  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9240  suballocations2nd.push_back(newSuballoc);
9241  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9242  }
9243  else
9244  {
9245  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9246 
9247  // First allocation.
9248  if(suballocations1st.empty())
9249  {
9250  suballocations1st.push_back(newSuballoc);
9251  }
9252  else
9253  {
9254  // New allocation at the end of 1st vector.
9255  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9256  {
9257  // Check if it fits before the end of the block.
9258  VMA_ASSERT(request.offset + allocSize <= GetSize());
9259  suballocations1st.push_back(newSuballoc);
9260  }
9261  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9262  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9263  {
9264  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9265 
9266  switch(m_2ndVectorMode)
9267  {
9268  case SECOND_VECTOR_EMPTY:
9269  // First allocation from second part ring buffer.
9270  VMA_ASSERT(suballocations2nd.empty());
9271  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9272  break;
9273  case SECOND_VECTOR_RING_BUFFER:
9274  // 2-part ring buffer is already started.
9275  VMA_ASSERT(!suballocations2nd.empty());
9276  break;
9277  case SECOND_VECTOR_DOUBLE_STACK:
9278  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9279  break;
9280  default:
9281  VMA_ASSERT(0);
9282  }
9283 
9284  suballocations2nd.push_back(newSuballoc);
9285  }
9286  else
9287  {
9288  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9289  }
9290  }
9291  }
9292 
9293  m_SumFreeSize -= newSuballoc.size;
9294 }
9295 
9296 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9297 {
9298  FreeAtOffset(allocation->GetOffset());
9299 }
9300 
9301 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9302 {
9303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9305 
9306  if(!suballocations1st.empty())
9307  {
9308  // First allocation: Mark it as next empty at the beginning.
9309  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9310  if(firstSuballoc.offset == offset)
9311  {
9312  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9313  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9314  m_SumFreeSize += firstSuballoc.size;
9315  ++m_1stNullItemsBeginCount;
9316  CleanupAfterFree();
9317  return;
9318  }
9319  }
9320 
9321  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9322  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9323  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9324  {
9325  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9326  if(lastSuballoc.offset == offset)
9327  {
9328  m_SumFreeSize += lastSuballoc.size;
9329  suballocations2nd.pop_back();
9330  CleanupAfterFree();
9331  return;
9332  }
9333  }
9334  // Last allocation in 1st vector.
9335  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9336  {
9337  VmaSuballocation& lastSuballoc = suballocations1st.back();
9338  if(lastSuballoc.offset == offset)
9339  {
9340  m_SumFreeSize += lastSuballoc.size;
9341  suballocations1st.pop_back();
9342  CleanupAfterFree();
9343  return;
9344  }
9345  }
9346 
9347  // Item from the middle of 1st vector.
9348  {
9349  VmaSuballocation refSuballoc;
9350  refSuballoc.offset = offset;
9351  // Rest of members stays uninitialized intentionally for better performance.
9352  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9353  suballocations1st.begin() + m_1stNullItemsBeginCount,
9354  suballocations1st.end(),
9355  refSuballoc);
9356  if(it != suballocations1st.end())
9357  {
9358  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9359  it->hAllocation = VK_NULL_HANDLE;
9360  ++m_1stNullItemsMiddleCount;
9361  m_SumFreeSize += it->size;
9362  CleanupAfterFree();
9363  return;
9364  }
9365  }
9366 
9367  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9368  {
9369  // Item from the middle of 2nd vector.
9370  VmaSuballocation refSuballoc;
9371  refSuballoc.offset = offset;
9372  // Rest of members stays uninitialized intentionally for better performance.
9373  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9374  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9375  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9376  if(it != suballocations2nd.end())
9377  {
9378  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9379  it->hAllocation = VK_NULL_HANDLE;
9380  ++m_2ndNullItemsCount;
9381  m_SumFreeSize += it->size;
9382  CleanupAfterFree();
9383  return;
9384  }
9385  }
9386 
9387  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9388 }
9389 
9390 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9391 {
9392  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9393  const size_t suballocCount = AccessSuballocations1st().size();
9394  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9395 }
9396 
9397 void VmaBlockMetadata_Linear::CleanupAfterFree()
9398 {
9399  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9400  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9401 
9402  if(IsEmpty())
9403  {
9404  suballocations1st.clear();
9405  suballocations2nd.clear();
9406  m_1stNullItemsBeginCount = 0;
9407  m_1stNullItemsMiddleCount = 0;
9408  m_2ndNullItemsCount = 0;
9409  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9410  }
9411  else
9412  {
9413  const size_t suballoc1stCount = suballocations1st.size();
9414  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9415  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9416 
9417  // Find more null items at the beginning of 1st vector.
9418  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9419  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9420  {
9421  ++m_1stNullItemsBeginCount;
9422  --m_1stNullItemsMiddleCount;
9423  }
9424 
9425  // Find more null items at the end of 1st vector.
9426  while(m_1stNullItemsMiddleCount > 0 &&
9427  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9428  {
9429  --m_1stNullItemsMiddleCount;
9430  suballocations1st.pop_back();
9431  }
9432 
9433  // Find more null items at the end of 2nd vector.
9434  while(m_2ndNullItemsCount > 0 &&
9435  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9436  {
9437  --m_2ndNullItemsCount;
9438  suballocations2nd.pop_back();
9439  }
9440 
9441  if(ShouldCompact1st())
9442  {
9443  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9444  size_t srcIndex = m_1stNullItemsBeginCount;
9445  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9446  {
9447  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9448  {
9449  ++srcIndex;
9450  }
9451  if(dstIndex != srcIndex)
9452  {
9453  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9454  }
9455  ++srcIndex;
9456  }
9457  suballocations1st.resize(nonNullItemCount);
9458  m_1stNullItemsBeginCount = 0;
9459  m_1stNullItemsMiddleCount = 0;
9460  }
9461 
9462  // 2nd vector became empty.
9463  if(suballocations2nd.empty())
9464  {
9465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9466  }
9467 
9468  // 1st vector became empty.
9469  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9470  {
9471  suballocations1st.clear();
9472  m_1stNullItemsBeginCount = 0;
9473 
9474  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9475  {
9476  // Swap 1st with 2nd. Now 2nd is empty.
9477  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9478  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9479  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9480  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9481  {
9482  ++m_1stNullItemsBeginCount;
9483  --m_1stNullItemsMiddleCount;
9484  }
9485  m_2ndNullItemsCount = 0;
9486  m_1stVectorIndex ^= 1;
9487  }
9488  }
9489  }
9490 
9491  VMA_HEAVY_ASSERT(Validate());
9492 }
9493 
9494 
9496 // class VmaBlockMetadata_Buddy
9497 
9498 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9499  VmaBlockMetadata(hAllocator),
9500  m_Root(VMA_NULL),
9501  m_AllocationCount(0),
9502  m_FreeCount(1),
9503  m_SumFreeSize(0)
9504 {
9505  memset(m_FreeList, 0, sizeof(m_FreeList));
9506 }
9507 
9508 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9509 {
9510  DeleteNode(m_Root);
9511 }
9512 
9513 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9514 {
9515  VmaBlockMetadata::Init(size);
9516 
9517  m_UsableSize = VmaPrevPow2(size);
9518  m_SumFreeSize = m_UsableSize;
9519 
9520  // Calculate m_LevelCount.
9521  m_LevelCount = 1;
9522  while(m_LevelCount < MAX_LEVELS &&
9523  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9524  {
9525  ++m_LevelCount;
9526  }
9527 
9528  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9529  rootNode->offset = 0;
9530  rootNode->type = Node::TYPE_FREE;
9531  rootNode->parent = VMA_NULL;
9532  rootNode->buddy = VMA_NULL;
9533 
9534  m_Root = rootNode;
9535  AddToFreeListFront(0, rootNode);
9536 }
9537 
9538 bool VmaBlockMetadata_Buddy::Validate() const
9539 {
9540  // Validate tree.
9541  ValidationContext ctx;
9542  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9543  {
9544  VMA_VALIDATE(false && "ValidateNode failed.");
9545  }
9546  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9547  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9548 
9549  // Validate free node lists.
9550  for(uint32_t level = 0; level < m_LevelCount; ++level)
9551  {
9552  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9553  m_FreeList[level].front->free.prev == VMA_NULL);
9554 
9555  for(Node* node = m_FreeList[level].front;
9556  node != VMA_NULL;
9557  node = node->free.next)
9558  {
9559  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9560 
9561  if(node->free.next == VMA_NULL)
9562  {
9563  VMA_VALIDATE(m_FreeList[level].back == node);
9564  }
9565  else
9566  {
9567  VMA_VALIDATE(node->free.next->free.prev == node);
9568  }
9569  }
9570  }
9571 
9572  // Validate that free lists ar higher levels are empty.
9573  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9574  {
9575  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9576  }
9577 
9578  return true;
9579 }
9580 
9581 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9582 {
9583  for(uint32_t level = 0; level < m_LevelCount; ++level)
9584  {
9585  if(m_FreeList[level].front != VMA_NULL)
9586  {
9587  return LevelToNodeSize(level);
9588  }
9589  }
9590  return 0;
9591 }
9592 
9593 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9594 {
9595  const VkDeviceSize unusableSize = GetUnusableSize();
9596 
9597  outInfo.blockCount = 1;
9598 
9599  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9600  outInfo.usedBytes = outInfo.unusedBytes = 0;
9601 
9602  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9603  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9604  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9605 
9606  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9607 
9608  if(unusableSize > 0)
9609  {
9610  ++outInfo.unusedRangeCount;
9611  outInfo.unusedBytes += unusableSize;
9612  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9613  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9614  }
9615 }
9616 
9617 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9618 {
9619  const VkDeviceSize unusableSize = GetUnusableSize();
9620 
9621  inoutStats.size += GetSize();
9622  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9623  inoutStats.allocationCount += m_AllocationCount;
9624  inoutStats.unusedRangeCount += m_FreeCount;
9625  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9626 
9627  if(unusableSize > 0)
9628  {
9629  ++inoutStats.unusedRangeCount;
9630  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9631  }
9632 }
9633 
9634 #if VMA_STATS_STRING_ENABLED
9635 
9636 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9637 {
9638  // TODO optimize
9639  VmaStatInfo stat;
9640  CalcAllocationStatInfo(stat);
9641 
9642  PrintDetailedMap_Begin(
9643  json,
9644  stat.unusedBytes,
9645  stat.allocationCount,
9646  stat.unusedRangeCount);
9647 
9648  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9649 
9650  const VkDeviceSize unusableSize = GetUnusableSize();
9651  if(unusableSize > 0)
9652  {
9653  PrintDetailedMap_UnusedRange(json,
9654  m_UsableSize, // offset
9655  unusableSize); // size
9656  }
9657 
9658  PrintDetailedMap_End(json);
9659 }
9660 
9661 #endif // #if VMA_STATS_STRING_ENABLED
9662 
9663 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9664  uint32_t currentFrameIndex,
9665  uint32_t frameInUseCount,
9666  VkDeviceSize bufferImageGranularity,
9667  VkDeviceSize allocSize,
9668  VkDeviceSize allocAlignment,
9669  bool upperAddress,
9670  VmaSuballocationType allocType,
9671  bool canMakeOtherLost,
9672  uint32_t strategy,
9673  VmaAllocationRequest* pAllocationRequest)
9674 {
9675  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9676 
9677  // Simple way to respect bufferImageGranularity. May be optimized some day.
9678  // Whenever it might be an OPTIMAL image...
9679  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9680  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9682  {
9683  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9684  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9685  }
9686 
9687  if(allocSize > m_UsableSize)
9688  {
9689  return false;
9690  }
9691 
9692  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9693  for(uint32_t level = targetLevel + 1; level--; )
9694  {
9695  for(Node* freeNode = m_FreeList[level].front;
9696  freeNode != VMA_NULL;
9697  freeNode = freeNode->free.next)
9698  {
9699  if(freeNode->offset % allocAlignment == 0)
9700  {
9701  pAllocationRequest->offset = freeNode->offset;
9702  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9703  pAllocationRequest->sumItemSize = 0;
9704  pAllocationRequest->itemsToMakeLostCount = 0;
9705  pAllocationRequest->customData = (void*)(uintptr_t)level;
9706  return true;
9707  }
9708  }
9709  }
9710 
9711  return false;
9712 }
9713 
9714 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9715  uint32_t currentFrameIndex,
9716  uint32_t frameInUseCount,
9717  VmaAllocationRequest* pAllocationRequest)
9718 {
9719  /*
9720  Lost allocations are not supported in buddy allocator at the moment.
9721  Support might be added in the future.
9722  */
9723  return pAllocationRequest->itemsToMakeLostCount == 0;
9724 }
9725 
9726 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9727 {
9728  /*
9729  Lost allocations are not supported in buddy allocator at the moment.
9730  Support might be added in the future.
9731  */
9732  return 0;
9733 }
9734 
9735 void VmaBlockMetadata_Buddy::Alloc(
9736  const VmaAllocationRequest& request,
9737  VmaSuballocationType type,
9738  VkDeviceSize allocSize,
9739  bool upperAddress,
9740  VmaAllocation hAllocation)
9741 {
9742  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9743  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9744 
9745  Node* currNode = m_FreeList[currLevel].front;
9746  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9747  while(currNode->offset != request.offset)
9748  {
9749  currNode = currNode->free.next;
9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9751  }
9752 
9753  // Go down, splitting free nodes.
9754  while(currLevel < targetLevel)
9755  {
9756  // currNode is already first free node at currLevel.
9757  // Remove it from list of free nodes at this currLevel.
9758  RemoveFromFreeList(currLevel, currNode);
9759 
9760  const uint32_t childrenLevel = currLevel + 1;
9761 
9762  // Create two free sub-nodes.
9763  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9764  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9765 
9766  leftChild->offset = currNode->offset;
9767  leftChild->type = Node::TYPE_FREE;
9768  leftChild->parent = currNode;
9769  leftChild->buddy = rightChild;
9770 
9771  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9772  rightChild->type = Node::TYPE_FREE;
9773  rightChild->parent = currNode;
9774  rightChild->buddy = leftChild;
9775 
9776  // Convert current currNode to split type.
9777  currNode->type = Node::TYPE_SPLIT;
9778  currNode->split.leftChild = leftChild;
9779 
9780  // Add child nodes to free list. Order is important!
9781  AddToFreeListFront(childrenLevel, rightChild);
9782  AddToFreeListFront(childrenLevel, leftChild);
9783 
9784  ++m_FreeCount;
9785  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9786  ++currLevel;
9787  currNode = m_FreeList[currLevel].front;
9788 
9789  /*
9790  We can be sure that currNode, as left child of node previously split,
9791  also fullfills the alignment requirement.
9792  */
9793  }
9794 
9795  // Remove from free list.
9796  VMA_ASSERT(currLevel == targetLevel &&
9797  currNode != VMA_NULL &&
9798  currNode->type == Node::TYPE_FREE);
9799  RemoveFromFreeList(currLevel, currNode);
9800 
9801  // Convert to allocation node.
9802  currNode->type = Node::TYPE_ALLOCATION;
9803  currNode->allocation.alloc = hAllocation;
9804 
9805  ++m_AllocationCount;
9806  --m_FreeCount;
9807  m_SumFreeSize -= allocSize;
9808 }
9809 
9810 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9811 {
9812  if(node->type == Node::TYPE_SPLIT)
9813  {
9814  DeleteNode(node->split.leftChild->buddy);
9815  DeleteNode(node->split.leftChild);
9816  }
9817 
9818  vma_delete(GetAllocationCallbacks(), node);
9819 }
9820 
9821 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9822 {
9823  VMA_VALIDATE(level < m_LevelCount);
9824  VMA_VALIDATE(curr->parent == parent);
9825  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9826  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9827  switch(curr->type)
9828  {
9829  case Node::TYPE_FREE:
9830  // curr->free.prev, next are validated separately.
9831  ctx.calculatedSumFreeSize += levelNodeSize;
9832  ++ctx.calculatedFreeCount;
9833  break;
9834  case Node::TYPE_ALLOCATION:
9835  ++ctx.calculatedAllocationCount;
9836  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9837  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9838  break;
9839  case Node::TYPE_SPLIT:
9840  {
9841  const uint32_t childrenLevel = level + 1;
9842  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9843  const Node* const leftChild = curr->split.leftChild;
9844  VMA_VALIDATE(leftChild != VMA_NULL);
9845  VMA_VALIDATE(leftChild->offset == curr->offset);
9846  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9847  {
9848  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9849  }
9850  const Node* const rightChild = leftChild->buddy;
9851  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9852  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9853  {
9854  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9855  }
9856  }
9857  break;
9858  default:
9859  return false;
9860  }
9861 
9862  return true;
9863 }
9864 
9865 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9866 {
9867  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9868  uint32_t level = 0;
9869  VkDeviceSize currLevelNodeSize = m_UsableSize;
9870  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9871  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9872  {
9873  ++level;
9874  currLevelNodeSize = nextLevelNodeSize;
9875  nextLevelNodeSize = currLevelNodeSize >> 1;
9876  }
9877  return level;
9878 }
9879 
9880 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9881 {
9882  // Find node and level.
9883  Node* node = m_Root;
9884  VkDeviceSize nodeOffset = 0;
9885  uint32_t level = 0;
9886  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9887  while(node->type == Node::TYPE_SPLIT)
9888  {
9889  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9890  if(offset < nodeOffset + nextLevelSize)
9891  {
9892  node = node->split.leftChild;
9893  }
9894  else
9895  {
9896  node = node->split.leftChild->buddy;
9897  nodeOffset += nextLevelSize;
9898  }
9899  ++level;
9900  levelNodeSize = nextLevelSize;
9901  }
9902 
9903  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9904  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9905 
9906  ++m_FreeCount;
9907  --m_AllocationCount;
9908  m_SumFreeSize += alloc->GetSize();
9909 
9910  node->type = Node::TYPE_FREE;
9911 
9912  // Join free nodes if possible.
9913  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9914  {
9915  RemoveFromFreeList(level, node->buddy);
9916  Node* const parent = node->parent;
9917 
9918  vma_delete(GetAllocationCallbacks(), node->buddy);
9919  vma_delete(GetAllocationCallbacks(), node);
9920  parent->type = Node::TYPE_FREE;
9921 
9922  node = parent;
9923  --level;
9924  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9925  --m_FreeCount;
9926  }
9927 
9928  AddToFreeListFront(level, node);
9929 }
9930 
9931 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9932 {
9933  switch(node->type)
9934  {
9935  case Node::TYPE_FREE:
9936  ++outInfo.unusedRangeCount;
9937  outInfo.unusedBytes += levelNodeSize;
9938  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9939  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9940  break;
9941  case Node::TYPE_ALLOCATION:
9942  {
9943  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9944  ++outInfo.allocationCount;
9945  outInfo.usedBytes += allocSize;
9946  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9947  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9948 
9949  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9950  if(unusedRangeSize > 0)
9951  {
9952  ++outInfo.unusedRangeCount;
9953  outInfo.unusedBytes += unusedRangeSize;
9954  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9955  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9956  }
9957  }
9958  break;
9959  case Node::TYPE_SPLIT:
9960  {
9961  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9962  const Node* const leftChild = node->split.leftChild;
9963  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9964  const Node* const rightChild = leftChild->buddy;
9965  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9966  }
9967  break;
9968  default:
9969  VMA_ASSERT(0);
9970  }
9971 }
9972 
9973 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9974 {
9975  VMA_ASSERT(node->type == Node::TYPE_FREE);
9976 
9977  // List is empty.
9978  Node* const frontNode = m_FreeList[level].front;
9979  if(frontNode == VMA_NULL)
9980  {
9981  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9982  node->free.prev = node->free.next = VMA_NULL;
9983  m_FreeList[level].front = m_FreeList[level].back = node;
9984  }
9985  else
9986  {
9987  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9988  node->free.prev = VMA_NULL;
9989  node->free.next = frontNode;
9990  frontNode->free.prev = node;
9991  m_FreeList[level].front = node;
9992  }
9993 }
9994 
9995 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9996 {
9997  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9998 
9999  // It is at the front.
10000  if(node->free.prev == VMA_NULL)
10001  {
10002  VMA_ASSERT(m_FreeList[level].front == node);
10003  m_FreeList[level].front = node->free.next;
10004  }
10005  else
10006  {
10007  Node* const prevFreeNode = node->free.prev;
10008  VMA_ASSERT(prevFreeNode->free.next == node);
10009  prevFreeNode->free.next = node->free.next;
10010  }
10011 
10012  // It is at the back.
10013  if(node->free.next == VMA_NULL)
10014  {
10015  VMA_ASSERT(m_FreeList[level].back == node);
10016  m_FreeList[level].back = node->free.prev;
10017  }
10018  else
10019  {
10020  Node* const nextFreeNode = node->free.next;
10021  VMA_ASSERT(nextFreeNode->free.prev == node);
10022  nextFreeNode->free.prev = node->free.prev;
10023  }
10024 }
10025 
10026 #if VMA_STATS_STRING_ENABLED
10027 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10028 {
10029  switch(node->type)
10030  {
10031  case Node::TYPE_FREE:
10032  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10033  break;
10034  case Node::TYPE_ALLOCATION:
10035  {
10036  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10037  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10038  if(allocSize < levelNodeSize)
10039  {
10040  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10041  }
10042  }
10043  break;
10044  case Node::TYPE_SPLIT:
10045  {
10046  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10047  const Node* const leftChild = node->split.leftChild;
10048  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10049  const Node* const rightChild = leftChild->buddy;
10050  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10051  }
10052  break;
10053  default:
10054  VMA_ASSERT(0);
10055  }
10056 }
10057 #endif // #if VMA_STATS_STRING_ENABLED
10058 
10059 
10061 // class VmaDeviceMemoryBlock
10062 
10063 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10064  m_pMetadata(VMA_NULL),
10065  m_MemoryTypeIndex(UINT32_MAX),
10066  m_Id(0),
10067  m_hMemory(VK_NULL_HANDLE),
10068  m_MapCount(0),
10069  m_pMappedData(VMA_NULL)
10070 {
10071 }
10072 
10073 void VmaDeviceMemoryBlock::Init(
10074  VmaAllocator hAllocator,
10075  uint32_t newMemoryTypeIndex,
10076  VkDeviceMemory newMemory,
10077  VkDeviceSize newSize,
10078  uint32_t id,
10079  uint32_t algorithm)
10080 {
10081  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10082 
10083  m_MemoryTypeIndex = newMemoryTypeIndex;
10084  m_Id = id;
10085  m_hMemory = newMemory;
10086 
10087  switch(algorithm)
10088  {
10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10091  break;
10093  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10094  break;
10095  default:
10096  VMA_ASSERT(0);
10097  // Fall-through.
10098  case 0:
10099  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10100  }
10101  m_pMetadata->Init(newSize);
10102 }
10103 
10104 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10105 {
10106  // This is the most important assert in the entire library.
10107  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10108  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10109 
10110  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10111  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10112  m_hMemory = VK_NULL_HANDLE;
10113 
10114  vma_delete(allocator, m_pMetadata);
10115  m_pMetadata = VMA_NULL;
10116 }
10117 
10118 bool VmaDeviceMemoryBlock::Validate() const
10119 {
10120  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10121  (m_pMetadata->GetSize() != 0));
10122 
10123  return m_pMetadata->Validate();
10124 }
10125 
10126 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10127 {
10128  void* pData = nullptr;
10129  VkResult res = Map(hAllocator, 1, &pData);
10130  if(res != VK_SUCCESS)
10131  {
10132  return res;
10133  }
10134 
10135  res = m_pMetadata->CheckCorruption(pData);
10136 
10137  Unmap(hAllocator, 1);
10138 
10139  return res;
10140 }
10141 
10142 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10143 {
10144  if(count == 0)
10145  {
10146  return VK_SUCCESS;
10147  }
10148 
10149  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10150  if(m_MapCount != 0)
10151  {
10152  m_MapCount += count;
10153  VMA_ASSERT(m_pMappedData != VMA_NULL);
10154  if(ppData != VMA_NULL)
10155  {
10156  *ppData = m_pMappedData;
10157  }
10158  return VK_SUCCESS;
10159  }
10160  else
10161  {
10162  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10163  hAllocator->m_hDevice,
10164  m_hMemory,
10165  0, // offset
10166  VK_WHOLE_SIZE,
10167  0, // flags
10168  &m_pMappedData);
10169  if(result == VK_SUCCESS)
10170  {
10171  if(ppData != VMA_NULL)
10172  {
10173  *ppData = m_pMappedData;
10174  }
10175  m_MapCount = count;
10176  }
10177  return result;
10178  }
10179 }
10180 
10181 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10182 {
10183  if(count == 0)
10184  {
10185  return;
10186  }
10187 
10188  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10189  if(m_MapCount >= count)
10190  {
10191  m_MapCount -= count;
10192  if(m_MapCount == 0)
10193  {
10194  m_pMappedData = VMA_NULL;
10195  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10196  }
10197  }
10198  else
10199  {
10200  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10201  }
10202 }
10203 
10204 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10205 {
10206  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10207  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10208 
10209  void* pData;
10210  VkResult res = Map(hAllocator, 1, &pData);
10211  if(res != VK_SUCCESS)
10212  {
10213  return res;
10214  }
10215 
10216  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10217  VmaWriteMagicValue(pData, allocOffset + allocSize);
10218 
10219  Unmap(hAllocator, 1);
10220 
10221  return VK_SUCCESS;
10222 }
10223 
10224 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10225 {
10226  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10227  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10228 
10229  void* pData;
10230  VkResult res = Map(hAllocator, 1, &pData);
10231  if(res != VK_SUCCESS)
10232  {
10233  return res;
10234  }
10235 
10236  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10237  {
10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10239  }
10240  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10241  {
10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10243  }
10244 
10245  Unmap(hAllocator, 1);
10246 
10247  return VK_SUCCESS;
10248 }
10249 
10250 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10251  const VmaAllocator hAllocator,
10252  const VmaAllocation hAllocation,
10253  VkBuffer hBuffer)
10254 {
10255  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10256  hAllocation->GetBlock() == this);
10257  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10258  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10259  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10260  hAllocator->m_hDevice,
10261  hBuffer,
10262  m_hMemory,
10263  hAllocation->GetOffset());
10264 }
10265 
10266 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10267  const VmaAllocator hAllocator,
10268  const VmaAllocation hAllocation,
10269  VkImage hImage)
10270 {
10271  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10272  hAllocation->GetBlock() == this);
10273  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10274  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10275  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10276  hAllocator->m_hDevice,
10277  hImage,
10278  m_hMemory,
10279  hAllocation->GetOffset());
10280 }
10281 
10282 static void InitStatInfo(VmaStatInfo& outInfo)
10283 {
10284  memset(&outInfo, 0, sizeof(outInfo));
10285  outInfo.allocationSizeMin = UINT64_MAX;
10286  outInfo.unusedRangeSizeMin = UINT64_MAX;
10287 }
10288 
10289 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10290 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10291 {
10292  inoutInfo.blockCount += srcInfo.blockCount;
10293  inoutInfo.allocationCount += srcInfo.allocationCount;
10294  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10295  inoutInfo.usedBytes += srcInfo.usedBytes;
10296  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10297  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10298  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10299  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10300  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10301 }
10302 
10303 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10304 {
10305  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10306  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10307  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10308  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10309 }
10310 
10311 VmaPool_T::VmaPool_T(
10312  VmaAllocator hAllocator,
10313  const VmaPoolCreateInfo& createInfo,
10314  VkDeviceSize preferredBlockSize) :
10315  m_BlockVector(
10316  hAllocator,
10317  createInfo.memoryTypeIndex,
10318  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10319  createInfo.minBlockCount,
10320  createInfo.maxBlockCount,
10321  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10322  createInfo.frameInUseCount,
10323  true, // isCustomPool
10324  createInfo.blockSize != 0, // explicitBlockSize
10325  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10326  m_Id(0)
10327 {
10328 }
10329 
10330 VmaPool_T::~VmaPool_T()
10331 {
10332 }
10333 
10334 #if VMA_STATS_STRING_ENABLED
10335 
10336 #endif // #if VMA_STATS_STRING_ENABLED
10337 
10338 VmaBlockVector::VmaBlockVector(
10339  VmaAllocator hAllocator,
10340  uint32_t memoryTypeIndex,
10341  VkDeviceSize preferredBlockSize,
10342  size_t minBlockCount,
10343  size_t maxBlockCount,
10344  VkDeviceSize bufferImageGranularity,
10345  uint32_t frameInUseCount,
10346  bool isCustomPool,
10347  bool explicitBlockSize,
10348  uint32_t algorithm) :
10349  m_hAllocator(hAllocator),
10350  m_MemoryTypeIndex(memoryTypeIndex),
10351  m_PreferredBlockSize(preferredBlockSize),
10352  m_MinBlockCount(minBlockCount),
10353  m_MaxBlockCount(maxBlockCount),
10354  m_BufferImageGranularity(bufferImageGranularity),
10355  m_FrameInUseCount(frameInUseCount),
10356  m_IsCustomPool(isCustomPool),
10357  m_ExplicitBlockSize(explicitBlockSize),
10358  m_Algorithm(algorithm),
10359  m_HasEmptyBlock(false),
10360  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10361  m_pDefragmentator(VMA_NULL),
10362  m_NextBlockId(0)
10363 {
10364 }
10365 
10366 VmaBlockVector::~VmaBlockVector()
10367 {
10368  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10369 
10370  for(size_t i = m_Blocks.size(); i--; )
10371  {
10372  m_Blocks[i]->Destroy(m_hAllocator);
10373  vma_delete(m_hAllocator, m_Blocks[i]);
10374  }
10375 }
10376 
10377 VkResult VmaBlockVector::CreateMinBlocks()
10378 {
10379  for(size_t i = 0; i < m_MinBlockCount; ++i)
10380  {
10381  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10382  if(res != VK_SUCCESS)
10383  {
10384  return res;
10385  }
10386  }
10387  return VK_SUCCESS;
10388 }
10389 
10390 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10391 {
10392  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10393 
10394  const size_t blockCount = m_Blocks.size();
10395 
10396  pStats->size = 0;
10397  pStats->unusedSize = 0;
10398  pStats->allocationCount = 0;
10399  pStats->unusedRangeCount = 0;
10400  pStats->unusedRangeSizeMax = 0;
10401  pStats->blockCount = blockCount;
10402 
10403  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10404  {
10405  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10406  VMA_ASSERT(pBlock);
10407  VMA_HEAVY_ASSERT(pBlock->Validate());
10408  pBlock->m_pMetadata->AddPoolStats(*pStats);
10409  }
10410 }
10411 
10412 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10413 {
10414  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10415  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10416  (VMA_DEBUG_MARGIN > 0) &&
10417  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10418 }
10419 
10420 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10421 
10422 VkResult VmaBlockVector::Allocate(
10423  VmaPool hCurrentPool,
10424  uint32_t currentFrameIndex,
10425  VkDeviceSize size,
10426  VkDeviceSize alignment,
10427  const VmaAllocationCreateInfo& createInfo,
10428  VmaSuballocationType suballocType,
10429  VmaAllocation* pAllocation)
10430 {
10431  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10432  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10433  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10434  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10435  const bool canCreateNewBlock =
10436  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10437  (m_Blocks.size() < m_MaxBlockCount);
10438  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10439 
10440  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10441  // Which in turn is available only when maxBlockCount = 1.
10442  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10443  {
10444  canMakeOtherLost = false;
10445  }
10446 
10447  // Upper address can only be used with linear allocator and within single memory block.
10448  if(isUpperAddress &&
10449  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10450  {
10451  return VK_ERROR_FEATURE_NOT_PRESENT;
10452  }
10453 
10454  // Validate strategy.
10455  switch(strategy)
10456  {
10457  case 0:
10459  break;
10463  break;
10464  default:
10465  return VK_ERROR_FEATURE_NOT_PRESENT;
10466  }
10467 
10468  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10469  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10470  {
10471  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10472  }
10473 
10474  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10475 
10476  /*
10477  Under certain condition, this whole section can be skipped for optimization, so
10478  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10479  e.g. for custom pools with linear algorithm.
10480  */
10481  if(!canMakeOtherLost || canCreateNewBlock)
10482  {
10483  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10484  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10486 
10487  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10488  {
10489  // Use only last block.
10490  if(!m_Blocks.empty())
10491  {
10492  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10493  VMA_ASSERT(pCurrBlock);
10494  VkResult res = AllocateFromBlock(
10495  pCurrBlock,
10496  hCurrentPool,
10497  currentFrameIndex,
10498  size,
10499  alignment,
10500  allocFlagsCopy,
10501  createInfo.pUserData,
10502  suballocType,
10503  strategy,
10504  pAllocation);
10505  if(res == VK_SUCCESS)
10506  {
10507  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10508  return VK_SUCCESS;
10509  }
10510  }
10511  }
10512  else
10513  {
10515  {
10516  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10517  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10518  {
10519  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10520  VMA_ASSERT(pCurrBlock);
10521  VkResult res = AllocateFromBlock(
10522  pCurrBlock,
10523  hCurrentPool,
10524  currentFrameIndex,
10525  size,
10526  alignment,
10527  allocFlagsCopy,
10528  createInfo.pUserData,
10529  suballocType,
10530  strategy,
10531  pAllocation);
10532  if(res == VK_SUCCESS)
10533  {
10534  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10535  return VK_SUCCESS;
10536  }
10537  }
10538  }
10539  else // WORST_FIT, FIRST_FIT
10540  {
10541  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10542  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10543  {
10544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10545  VMA_ASSERT(pCurrBlock);
10546  VkResult res = AllocateFromBlock(
10547  pCurrBlock,
10548  hCurrentPool,
10549  currentFrameIndex,
10550  size,
10551  alignment,
10552  allocFlagsCopy,
10553  createInfo.pUserData,
10554  suballocType,
10555  strategy,
10556  pAllocation);
10557  if(res == VK_SUCCESS)
10558  {
10559  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10560  return VK_SUCCESS;
10561  }
10562  }
10563  }
10564  }
10565 
10566  // 2. Try to create new block.
10567  if(canCreateNewBlock)
10568  {
10569  // Calculate optimal size for new block.
10570  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10571  uint32_t newBlockSizeShift = 0;
10572  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10573 
10574  if(!m_ExplicitBlockSize)
10575  {
10576  // Allocate 1/8, 1/4, 1/2 as first blocks.
10577  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10578  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10579  {
10580  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10581  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10582  {
10583  newBlockSize = smallerNewBlockSize;
10584  ++newBlockSizeShift;
10585  }
10586  else
10587  {
10588  break;
10589  }
10590  }
10591  }
10592 
10593  size_t newBlockIndex = 0;
10594  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10595  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10596  if(!m_ExplicitBlockSize)
10597  {
10598  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10599  {
10600  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10601  if(smallerNewBlockSize >= size)
10602  {
10603  newBlockSize = smallerNewBlockSize;
10604  ++newBlockSizeShift;
10605  res = CreateBlock(newBlockSize, &newBlockIndex);
10606  }
10607  else
10608  {
10609  break;
10610  }
10611  }
10612  }
10613 
10614  if(res == VK_SUCCESS)
10615  {
10616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10617  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10618 
10619  res = AllocateFromBlock(
10620  pBlock,
10621  hCurrentPool,
10622  currentFrameIndex,
10623  size,
10624  alignment,
10625  allocFlagsCopy,
10626  createInfo.pUserData,
10627  suballocType,
10628  strategy,
10629  pAllocation);
10630  if(res == VK_SUCCESS)
10631  {
10632  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10633  return VK_SUCCESS;
10634  }
10635  else
10636  {
10637  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10638  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10639  }
10640  }
10641  }
10642  }
10643 
10644  // 3. Try to allocate from existing blocks with making other allocations lost.
10645  if(canMakeOtherLost)
10646  {
10647  uint32_t tryIndex = 0;
10648  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10649  {
10650  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10651  VmaAllocationRequest bestRequest = {};
10652  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10653 
10654  // 1. Search existing allocations.
10656  {
10657  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10658  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10659  {
10660  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10661  VMA_ASSERT(pCurrBlock);
10662  VmaAllocationRequest currRequest = {};
10663  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10664  currentFrameIndex,
10665  m_FrameInUseCount,
10666  m_BufferImageGranularity,
10667  size,
10668  alignment,
10669  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10670  suballocType,
10671  canMakeOtherLost,
10672  strategy,
10673  &currRequest))
10674  {
10675  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10676  if(pBestRequestBlock == VMA_NULL ||
10677  currRequestCost < bestRequestCost)
10678  {
10679  pBestRequestBlock = pCurrBlock;
10680  bestRequest = currRequest;
10681  bestRequestCost = currRequestCost;
10682 
10683  if(bestRequestCost == 0)
10684  {
10685  break;
10686  }
10687  }
10688  }
10689  }
10690  }
10691  else // WORST_FIT, FIRST_FIT
10692  {
10693  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10694  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10695  {
10696  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10697  VMA_ASSERT(pCurrBlock);
10698  VmaAllocationRequest currRequest = {};
10699  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10700  currentFrameIndex,
10701  m_FrameInUseCount,
10702  m_BufferImageGranularity,
10703  size,
10704  alignment,
10705  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10706  suballocType,
10707  canMakeOtherLost,
10708  strategy,
10709  &currRequest))
10710  {
10711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10712  if(pBestRequestBlock == VMA_NULL ||
10713  currRequestCost < bestRequestCost ||
10715  {
10716  pBestRequestBlock = pCurrBlock;
10717  bestRequest = currRequest;
10718  bestRequestCost = currRequestCost;
10719 
10720  if(bestRequestCost == 0 ||
10722  {
10723  break;
10724  }
10725  }
10726  }
10727  }
10728  }
10729 
10730  if(pBestRequestBlock != VMA_NULL)
10731  {
10732  if(mapped)
10733  {
10734  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10735  if(res != VK_SUCCESS)
10736  {
10737  return res;
10738  }
10739  }
10740 
10741  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10742  currentFrameIndex,
10743  m_FrameInUseCount,
10744  &bestRequest))
10745  {
10746  // We no longer have an empty Allocation.
10747  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10748  {
10749  m_HasEmptyBlock = false;
10750  }
10751  // Allocate from this pBlock.
10752  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10753  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10754  (*pAllocation)->InitBlockAllocation(
10755  hCurrentPool,
10756  pBestRequestBlock,
10757  bestRequest.offset,
10758  alignment,
10759  size,
10760  suballocType,
10761  mapped,
10762  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10763  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10764  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10765  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10767  {
10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10769  }
10770  if(IsCorruptionDetectionEnabled())
10771  {
10772  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10774  }
10775  return VK_SUCCESS;
10776  }
10777  // else: Some allocations must have been touched while we are here. Next try.
10778  }
10779  else
10780  {
10781  // Could not find place in any of the blocks - break outer loop.
10782  break;
10783  }
10784  }
10785  /* Maximum number of tries exceeded - a very unlike event when many other
10786  threads are simultaneously touching allocations making it impossible to make
10787  lost at the same time as we try to allocate. */
10788  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10789  {
10790  return VK_ERROR_TOO_MANY_OBJECTS;
10791  }
10792  }
10793 
10794  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10795 }
10796 
10797 void VmaBlockVector::Free(
10798  VmaAllocation hAllocation)
10799 {
10800  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10801 
10802  // Scope for lock.
10803  {
10804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10805 
10806  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10807 
10808  if(IsCorruptionDetectionEnabled())
10809  {
10810  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10811  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10812  }
10813 
10814  if(hAllocation->IsPersistentMap())
10815  {
10816  pBlock->Unmap(m_hAllocator, 1);
10817  }
10818 
10819  pBlock->m_pMetadata->Free(hAllocation);
10820  VMA_HEAVY_ASSERT(pBlock->Validate());
10821 
10822  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10823 
10824  // pBlock became empty after this deallocation.
10825  if(pBlock->m_pMetadata->IsEmpty())
10826  {
10827  // Already has empty Allocation. We don't want to have two, so delete this one.
10828  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10829  {
10830  pBlockToDelete = pBlock;
10831  Remove(pBlock);
10832  }
10833  // We now have first empty block.
10834  else
10835  {
10836  m_HasEmptyBlock = true;
10837  }
10838  }
10839  // pBlock didn't become empty, but we have another empty block - find and free that one.
10840  // (This is optional, heuristics.)
10841  else if(m_HasEmptyBlock)
10842  {
10843  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10844  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10845  {
10846  pBlockToDelete = pLastBlock;
10847  m_Blocks.pop_back();
10848  m_HasEmptyBlock = false;
10849  }
10850  }
10851 
10852  IncrementallySortBlocks();
10853  }
10854 
10855  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10856  // lock, for performance reason.
10857  if(pBlockToDelete != VMA_NULL)
10858  {
10859  VMA_DEBUG_LOG(" Deleted empty allocation");
10860  pBlockToDelete->Destroy(m_hAllocator);
10861  vma_delete(m_hAllocator, pBlockToDelete);
10862  }
10863 }
10864 
10865 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10866 {
10867  VkDeviceSize result = 0;
10868  for(size_t i = m_Blocks.size(); i--; )
10869  {
10870  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10871  if(result >= m_PreferredBlockSize)
10872  {
10873  break;
10874  }
10875  }
10876  return result;
10877 }
10878 
10879 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10880 {
10881  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10882  {
10883  if(m_Blocks[blockIndex] == pBlock)
10884  {
10885  VmaVectorRemove(m_Blocks, blockIndex);
10886  return;
10887  }
10888  }
10889  VMA_ASSERT(0);
10890 }
10891 
10892 void VmaBlockVector::IncrementallySortBlocks()
10893 {
10894  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10895  {
10896  // Bubble sort only until first swap.
10897  for(size_t i = 1; i < m_Blocks.size(); ++i)
10898  {
10899  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10900  {
10901  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10902  return;
10903  }
10904  }
10905  }
10906 }
10907 
10908 VkResult VmaBlockVector::AllocateFromBlock(
10909  VmaDeviceMemoryBlock* pBlock,
10910  VmaPool hCurrentPool,
10911  uint32_t currentFrameIndex,
10912  VkDeviceSize size,
10913  VkDeviceSize alignment,
10914  VmaAllocationCreateFlags allocFlags,
10915  void* pUserData,
10916  VmaSuballocationType suballocType,
10917  uint32_t strategy,
10918  VmaAllocation* pAllocation)
10919 {
10920  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10921  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10922  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10923  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10924 
10925  VmaAllocationRequest currRequest = {};
10926  if(pBlock->m_pMetadata->CreateAllocationRequest(
10927  currentFrameIndex,
10928  m_FrameInUseCount,
10929  m_BufferImageGranularity,
10930  size,
10931  alignment,
10932  isUpperAddress,
10933  suballocType,
10934  false, // canMakeOtherLost
10935  strategy,
10936  &currRequest))
10937  {
10938  // Allocate from pCurrBlock.
10939  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10940 
10941  if(mapped)
10942  {
10943  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10944  if(res != VK_SUCCESS)
10945  {
10946  return res;
10947  }
10948  }
10949 
10950  // We no longer have an empty Allocation.
10951  if(pBlock->m_pMetadata->IsEmpty())
10952  {
10953  m_HasEmptyBlock = false;
10954  }
10955 
10956  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10957  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10958  (*pAllocation)->InitBlockAllocation(
10959  hCurrentPool,
10960  pBlock,
10961  currRequest.offset,
10962  alignment,
10963  size,
10964  suballocType,
10965  mapped,
10966  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10967  VMA_HEAVY_ASSERT(pBlock->Validate());
10968  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10969  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10970  {
10971  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10972  }
10973  if(IsCorruptionDetectionEnabled())
10974  {
10975  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10977  }
10978  return VK_SUCCESS;
10979  }
10980  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10981 }
10982 
10983 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10984 {
10985  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10986  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10987  allocInfo.allocationSize = blockSize;
10988  VkDeviceMemory mem = VK_NULL_HANDLE;
10989  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10990  if(res < 0)
10991  {
10992  return res;
10993  }
10994 
10995  // New VkDeviceMemory successfully created.
10996 
10997  // Create new Allocation for it.
10998  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10999  pBlock->Init(
11000  m_hAllocator,
11001  m_MemoryTypeIndex,
11002  mem,
11003  allocInfo.allocationSize,
11004  m_NextBlockId++,
11005  m_Algorithm);
11006 
11007  m_Blocks.push_back(pBlock);
11008  if(pNewBlockIndex != VMA_NULL)
11009  {
11010  *pNewBlockIndex = m_Blocks.size() - 1;
11011  }
11012 
11013  return VK_SUCCESS;
11014 }
11015 
11016 #if VMA_STATS_STRING_ENABLED
11017 
11018 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11019 {
11020  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11021 
11022  json.BeginObject();
11023 
11024  if(m_IsCustomPool)
11025  {
11026  json.WriteString("MemoryTypeIndex");
11027  json.WriteNumber(m_MemoryTypeIndex);
11028 
11029  json.WriteString("BlockSize");
11030  json.WriteNumber(m_PreferredBlockSize);
11031 
11032  json.WriteString("BlockCount");
11033  json.BeginObject(true);
11034  if(m_MinBlockCount > 0)
11035  {
11036  json.WriteString("Min");
11037  json.WriteNumber((uint64_t)m_MinBlockCount);
11038  }
11039  if(m_MaxBlockCount < SIZE_MAX)
11040  {
11041  json.WriteString("Max");
11042  json.WriteNumber((uint64_t)m_MaxBlockCount);
11043  }
11044  json.WriteString("Cur");
11045  json.WriteNumber((uint64_t)m_Blocks.size());
11046  json.EndObject();
11047 
11048  if(m_FrameInUseCount > 0)
11049  {
11050  json.WriteString("FrameInUseCount");
11051  json.WriteNumber(m_FrameInUseCount);
11052  }
11053 
11054  if(m_Algorithm != 0)
11055  {
11056  json.WriteString("Algorithm");
11057  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11058  }
11059  }
11060  else
11061  {
11062  json.WriteString("PreferredBlockSize");
11063  json.WriteNumber(m_PreferredBlockSize);
11064  }
11065 
11066  json.WriteString("Blocks");
11067  json.BeginObject();
11068  for(size_t i = 0; i < m_Blocks.size(); ++i)
11069  {
11070  json.BeginString();
11071  json.ContinueString(m_Blocks[i]->GetId());
11072  json.EndString();
11073 
11074  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11075  }
11076  json.EndObject();
11077 
11078  json.EndObject();
11079 }
11080 
11081 #endif // #if VMA_STATS_STRING_ENABLED
11082 
11083 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11084  VmaAllocator hAllocator,
11085  uint32_t currentFrameIndex)
11086 {
11087  if(m_pDefragmentator == VMA_NULL)
11088  {
11089  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11090  hAllocator,
11091  this,
11092  currentFrameIndex);
11093  }
11094 
11095  return m_pDefragmentator;
11096 }
11097 
11098 VkResult VmaBlockVector::Defragment(
11099  VmaDefragmentationStats* pDefragmentationStats,
11100  VkDeviceSize& maxBytesToMove,
11101  uint32_t& maxAllocationsToMove)
11102 {
11103  if(m_pDefragmentator == VMA_NULL)
11104  {
11105  return VK_SUCCESS;
11106  }
11107 
11108  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11109 
11110  // Defragment.
11111  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11112 
11113  // Accumulate statistics.
11114  if(pDefragmentationStats != VMA_NULL)
11115  {
11116  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11117  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11118  pDefragmentationStats->bytesMoved += bytesMoved;
11119  pDefragmentationStats->allocationsMoved += allocationsMoved;
11120  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11121  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11122  maxBytesToMove -= bytesMoved;
11123  maxAllocationsToMove -= allocationsMoved;
11124  }
11125 
11126  // Free empty blocks.
11127  m_HasEmptyBlock = false;
11128  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11129  {
11130  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11131  if(pBlock->m_pMetadata->IsEmpty())
11132  {
11133  if(m_Blocks.size() > m_MinBlockCount)
11134  {
11135  if(pDefragmentationStats != VMA_NULL)
11136  {
11137  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11138  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11139  }
11140 
11141  VmaVectorRemove(m_Blocks, blockIndex);
11142  pBlock->Destroy(m_hAllocator);
11143  vma_delete(m_hAllocator, pBlock);
11144  }
11145  else
11146  {
11147  m_HasEmptyBlock = true;
11148  }
11149  }
11150  }
11151 
11152  return result;
11153 }
11154 
11155 void VmaBlockVector::DestroyDefragmentator()
11156 {
11157  if(m_pDefragmentator != VMA_NULL)
11158  {
11159  vma_delete(m_hAllocator, m_pDefragmentator);
11160  m_pDefragmentator = VMA_NULL;
11161  }
11162 }
11163 
11164 void VmaBlockVector::MakePoolAllocationsLost(
11165  uint32_t currentFrameIndex,
11166  size_t* pLostAllocationCount)
11167 {
11168  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11169  size_t lostAllocationCount = 0;
11170  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11171  {
11172  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11173  VMA_ASSERT(pBlock);
11174  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11175  }
11176  if(pLostAllocationCount != VMA_NULL)
11177  {
11178  *pLostAllocationCount = lostAllocationCount;
11179  }
11180 }
11181 
11182 VkResult VmaBlockVector::CheckCorruption()
11183 {
11184  if(!IsCorruptionDetectionEnabled())
11185  {
11186  return VK_ERROR_FEATURE_NOT_PRESENT;
11187  }
11188 
11189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11190  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11191  {
11192  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11193  VMA_ASSERT(pBlock);
11194  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11195  if(res != VK_SUCCESS)
11196  {
11197  return res;
11198  }
11199  }
11200  return VK_SUCCESS;
11201 }
11202 
11203 void VmaBlockVector::AddStats(VmaStats* pStats)
11204 {
11205  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11206  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11207 
11208  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11209 
11210  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11211  {
11212  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11213  VMA_ASSERT(pBlock);
11214  VMA_HEAVY_ASSERT(pBlock->Validate());
11215  VmaStatInfo allocationStatInfo;
11216  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11217  VmaAddStatInfo(pStats->total, allocationStatInfo);
11218  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11219  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11220  }
11221 }
11222 
11224 // VmaDefragmentator members definition
11225 
11226 VmaDefragmentator::VmaDefragmentator(
11227  VmaAllocator hAllocator,
11228  VmaBlockVector* pBlockVector,
11229  uint32_t currentFrameIndex) :
11230  m_hAllocator(hAllocator),
11231  m_pBlockVector(pBlockVector),
11232  m_CurrentFrameIndex(currentFrameIndex),
11233  m_BytesMoved(0),
11234  m_AllocationsMoved(0),
11235  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11236  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11237 {
11238  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11239 }
11240 
11241 VmaDefragmentator::~VmaDefragmentator()
11242 {
11243  for(size_t i = m_Blocks.size(); i--; )
11244  {
11245  vma_delete(m_hAllocator, m_Blocks[i]);
11246  }
11247 }
11248 
11249 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11250 {
11251  AllocationInfo allocInfo;
11252  allocInfo.m_hAllocation = hAlloc;
11253  allocInfo.m_pChanged = pChanged;
11254  m_Allocations.push_back(allocInfo);
11255 }
11256 
11257 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11258 {
11259  // It has already been mapped for defragmentation.
11260  if(m_pMappedDataForDefragmentation)
11261  {
11262  *ppMappedData = m_pMappedDataForDefragmentation;
11263  return VK_SUCCESS;
11264  }
11265 
11266  // It is originally mapped.
11267  if(m_pBlock->GetMappedData())
11268  {
11269  *ppMappedData = m_pBlock->GetMappedData();
11270  return VK_SUCCESS;
11271  }
11272 
11273  // Map on first usage.
11274  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11275  *ppMappedData = m_pMappedDataForDefragmentation;
11276  return res;
11277 }
11278 
11279 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11280 {
11281  if(m_pMappedDataForDefragmentation != VMA_NULL)
11282  {
11283  m_pBlock->Unmap(hAllocator, 1);
11284  }
11285 }
11286 
11287 VkResult VmaDefragmentator::DefragmentRound(
11288  VkDeviceSize maxBytesToMove,
11289  uint32_t maxAllocationsToMove)
11290 {
11291  if(m_Blocks.empty())
11292  {
11293  return VK_SUCCESS;
11294  }
11295 
11296  size_t srcBlockIndex = m_Blocks.size() - 1;
11297  size_t srcAllocIndex = SIZE_MAX;
11298  for(;;)
11299  {
11300  // 1. Find next allocation to move.
11301  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11302  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11303  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11304  {
11305  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11306  {
11307  // Finished: no more allocations to process.
11308  if(srcBlockIndex == 0)
11309  {
11310  return VK_SUCCESS;
11311  }
11312  else
11313  {
11314  --srcBlockIndex;
11315  srcAllocIndex = SIZE_MAX;
11316  }
11317  }
11318  else
11319  {
11320  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11321  }
11322  }
11323 
11324  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11325  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11326 
11327  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11328  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11329  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11330  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11331 
11332  // 2. Try to find new place for this allocation in preceding or current block.
11333  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11334  {
11335  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11336  VmaAllocationRequest dstAllocRequest;
11337  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11338  m_CurrentFrameIndex,
11339  m_pBlockVector->GetFrameInUseCount(),
11340  m_pBlockVector->GetBufferImageGranularity(),
11341  size,
11342  alignment,
11343  false, // upperAddress
11344  suballocType,
11345  false, // canMakeOtherLost
11347  &dstAllocRequest) &&
11348  MoveMakesSense(
11349  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11350  {
11351  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11352 
11353  // Reached limit on number of allocations or bytes to move.
11354  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11355  (m_BytesMoved + size > maxBytesToMove))
11356  {
11357  return VK_INCOMPLETE;
11358  }
11359 
11360  void* pDstMappedData = VMA_NULL;
11361  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11362  if(res != VK_SUCCESS)
11363  {
11364  return res;
11365  }
11366 
11367  void* pSrcMappedData = VMA_NULL;
11368  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11369  if(res != VK_SUCCESS)
11370  {
11371  return res;
11372  }
11373 
11374  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11375  memcpy(
11376  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11377  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11378  static_cast<size_t>(size));
11379 
11380  if(VMA_DEBUG_MARGIN > 0)
11381  {
11382  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11384  }
11385 
11386  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11387  dstAllocRequest,
11388  suballocType,
11389  size,
11390  false, // upperAddress
11391  allocInfo.m_hAllocation);
11392  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11393 
11394  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11395 
11396  if(allocInfo.m_pChanged != VMA_NULL)
11397  {
11398  *allocInfo.m_pChanged = VK_TRUE;
11399  }
11400 
11401  ++m_AllocationsMoved;
11402  m_BytesMoved += size;
11403 
11404  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11405 
11406  break;
11407  }
11408  }
11409 
11410  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11411 
11412  if(srcAllocIndex > 0)
11413  {
11414  --srcAllocIndex;
11415  }
11416  else
11417  {
11418  if(srcBlockIndex > 0)
11419  {
11420  --srcBlockIndex;
11421  srcAllocIndex = SIZE_MAX;
11422  }
11423  else
11424  {
11425  return VK_SUCCESS;
11426  }
11427  }
11428  }
11429 }
11430 
11431 VkResult VmaDefragmentator::Defragment(
11432  VkDeviceSize maxBytesToMove,
11433  uint32_t maxAllocationsToMove)
11434 {
11435  if(m_Allocations.empty())
11436  {
11437  return VK_SUCCESS;
11438  }
11439 
11440  // Create block info for each block.
11441  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11442  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11443  {
11444  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11445  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11446  m_Blocks.push_back(pBlockInfo);
11447  }
11448 
11449  // Sort them by m_pBlock pointer value.
11450  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11451 
11452  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11453  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11454  {
11455  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11456  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11457  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11458  {
11459  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11460  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11461  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11462  {
11463  (*it)->m_Allocations.push_back(allocInfo);
11464  }
11465  else
11466  {
11467  VMA_ASSERT(0);
11468  }
11469  }
11470  }
11471  m_Allocations.clear();
11472 
11473  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11474  {
11475  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11476  pBlockInfo->CalcHasNonMovableAllocations();
11477  pBlockInfo->SortAllocationsBySizeDescecnding();
11478  }
11479 
11480  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11481  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11482 
11483  // Execute defragmentation rounds (the main part).
11484  VkResult result = VK_SUCCESS;
11485  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11486  {
11487  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11488  }
11489 
11490  // Unmap blocks that were mapped for defragmentation.
11491  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11492  {
11493  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11494  }
11495 
11496  return result;
11497 }
11498 
11499 bool VmaDefragmentator::MoveMakesSense(
11500  size_t dstBlockIndex, VkDeviceSize dstOffset,
11501  size_t srcBlockIndex, VkDeviceSize srcOffset)
11502 {
11503  if(dstBlockIndex < srcBlockIndex)
11504  {
11505  return true;
11506  }
11507  if(dstBlockIndex > srcBlockIndex)
11508  {
11509  return false;
11510  }
11511  if(dstOffset < srcOffset)
11512  {
11513  return true;
11514  }
11515  return false;
11516 }
11517 
11519 // VmaRecorder
11520 
11521 #if VMA_RECORDING_ENABLED
11522 
11523 VmaRecorder::VmaRecorder() :
11524  m_UseMutex(true),
11525  m_Flags(0),
11526  m_File(VMA_NULL),
11527  m_Freq(INT64_MAX),
11528  m_StartCounter(INT64_MAX)
11529 {
11530 }
11531 
11532 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11533 {
11534  m_UseMutex = useMutex;
11535  m_Flags = settings.flags;
11536 
11537  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11538  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11539 
11540  // Open file for writing.
11541  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11542  if(err != 0)
11543  {
11544  return VK_ERROR_INITIALIZATION_FAILED;
11545  }
11546 
11547  // Write header.
11548  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11549  fprintf(m_File, "%s\n", "1,4");
11550 
11551  return VK_SUCCESS;
11552 }
11553 
11554 VmaRecorder::~VmaRecorder()
11555 {
11556  if(m_File != VMA_NULL)
11557  {
11558  fclose(m_File);
11559  }
11560 }
11561 
11562 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11563 {
11564  CallParams callParams;
11565  GetBasicParams(callParams);
11566 
11567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11568  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11569  Flush();
11570 }
11571 
11572 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11573 {
11574  CallParams callParams;
11575  GetBasicParams(callParams);
11576 
11577  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11578  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11579  Flush();
11580 }
11581 
11582 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11583 {
11584  CallParams callParams;
11585  GetBasicParams(callParams);
11586 
11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11588  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11589  createInfo.memoryTypeIndex,
11590  createInfo.flags,
11591  createInfo.blockSize,
11592  (uint64_t)createInfo.minBlockCount,
11593  (uint64_t)createInfo.maxBlockCount,
11594  createInfo.frameInUseCount,
11595  pool);
11596  Flush();
11597 }
11598 
11599 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11600 {
11601  CallParams callParams;
11602  GetBasicParams(callParams);
11603 
11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11605  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11606  pool);
11607  Flush();
11608 }
11609 
11610 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11611  const VkMemoryRequirements& vkMemReq,
11612  const VmaAllocationCreateInfo& createInfo,
11613  VmaAllocation allocation)
11614 {
11615  CallParams callParams;
11616  GetBasicParams(callParams);
11617 
11618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11619  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11620  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11621  vkMemReq.size,
11622  vkMemReq.alignment,
11623  vkMemReq.memoryTypeBits,
11624  createInfo.flags,
11625  createInfo.usage,
11626  createInfo.requiredFlags,
11627  createInfo.preferredFlags,
11628  createInfo.memoryTypeBits,
11629  createInfo.pool,
11630  allocation,
11631  userDataStr.GetString());
11632  Flush();
11633 }
11634 
11635 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11636  const VkMemoryRequirements& vkMemReq,
11637  bool requiresDedicatedAllocation,
11638  bool prefersDedicatedAllocation,
11639  const VmaAllocationCreateInfo& createInfo,
11640  VmaAllocation allocation)
11641 {
11642  CallParams callParams;
11643  GetBasicParams(callParams);
11644 
11645  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11646  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11647  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11648  vkMemReq.size,
11649  vkMemReq.alignment,
11650  vkMemReq.memoryTypeBits,
11651  requiresDedicatedAllocation ? 1 : 0,
11652  prefersDedicatedAllocation ? 1 : 0,
11653  createInfo.flags,
11654  createInfo.usage,
11655  createInfo.requiredFlags,
11656  createInfo.preferredFlags,
11657  createInfo.memoryTypeBits,
11658  createInfo.pool,
11659  allocation,
11660  userDataStr.GetString());
11661  Flush();
11662 }
11663 
11664 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11665  const VkMemoryRequirements& vkMemReq,
11666  bool requiresDedicatedAllocation,
11667  bool prefersDedicatedAllocation,
11668  const VmaAllocationCreateInfo& createInfo,
11669  VmaAllocation allocation)
11670 {
11671  CallParams callParams;
11672  GetBasicParams(callParams);
11673 
11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11675  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11676  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11677  vkMemReq.size,
11678  vkMemReq.alignment,
11679  vkMemReq.memoryTypeBits,
11680  requiresDedicatedAllocation ? 1 : 0,
11681  prefersDedicatedAllocation ? 1 : 0,
11682  createInfo.flags,
11683  createInfo.usage,
11684  createInfo.requiredFlags,
11685  createInfo.preferredFlags,
11686  createInfo.memoryTypeBits,
11687  createInfo.pool,
11688  allocation,
11689  userDataStr.GetString());
11690  Flush();
11691 }
11692 
11693 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11694  VmaAllocation allocation)
11695 {
11696  CallParams callParams;
11697  GetBasicParams(callParams);
11698 
11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11700  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11701  allocation);
11702  Flush();
11703 }
11704 
11705 void VmaRecorder::RecordResizeAllocation(
11706  uint32_t frameIndex,
11707  VmaAllocation allocation,
11708  VkDeviceSize newSize)
11709 {
11710  CallParams callParams;
11711  GetBasicParams(callParams);
11712 
11713  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11714  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11715  allocation, newSize);
11716  Flush();
11717 }
11718 
11719 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11720  VmaAllocation allocation,
11721  const void* pUserData)
11722 {
11723  CallParams callParams;
11724  GetBasicParams(callParams);
11725 
11726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11727  UserDataString userDataStr(
11728  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11729  pUserData);
11730  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11731  allocation,
11732  userDataStr.GetString());
11733  Flush();
11734 }
11735 
11736 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11737  VmaAllocation allocation)
11738 {
11739  CallParams callParams;
11740  GetBasicParams(callParams);
11741 
11742  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11743  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11744  allocation);
11745  Flush();
11746 }
11747 
11748 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11749  VmaAllocation allocation)
11750 {
11751  CallParams callParams;
11752  GetBasicParams(callParams);
11753 
11754  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11755  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11756  allocation);
11757  Flush();
11758 }
11759 
11760 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11761  VmaAllocation allocation)
11762 {
11763  CallParams callParams;
11764  GetBasicParams(callParams);
11765 
11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11767  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11768  allocation);
11769  Flush();
11770 }
11771 
11772 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11773  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11774 {
11775  CallParams callParams;
11776  GetBasicParams(callParams);
11777 
11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11779  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11780  allocation,
11781  offset,
11782  size);
11783  Flush();
11784 }
11785 
11786 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11787  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11788 {
11789  CallParams callParams;
11790  GetBasicParams(callParams);
11791 
11792  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11793  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11794  allocation,
11795  offset,
11796  size);
11797  Flush();
11798 }
11799 
11800 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11801  const VkBufferCreateInfo& bufCreateInfo,
11802  const VmaAllocationCreateInfo& allocCreateInfo,
11803  VmaAllocation allocation)
11804 {
11805  CallParams callParams;
11806  GetBasicParams(callParams);
11807 
11808  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11809  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11810  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11811  bufCreateInfo.flags,
11812  bufCreateInfo.size,
11813  bufCreateInfo.usage,
11814  bufCreateInfo.sharingMode,
11815  allocCreateInfo.flags,
11816  allocCreateInfo.usage,
11817  allocCreateInfo.requiredFlags,
11818  allocCreateInfo.preferredFlags,
11819  allocCreateInfo.memoryTypeBits,
11820  allocCreateInfo.pool,
11821  allocation,
11822  userDataStr.GetString());
11823  Flush();
11824 }
11825 
11826 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11827  const VkImageCreateInfo& imageCreateInfo,
11828  const VmaAllocationCreateInfo& allocCreateInfo,
11829  VmaAllocation allocation)
11830 {
11831  CallParams callParams;
11832  GetBasicParams(callParams);
11833 
11834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11835  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11836  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11837  imageCreateInfo.flags,
11838  imageCreateInfo.imageType,
11839  imageCreateInfo.format,
11840  imageCreateInfo.extent.width,
11841  imageCreateInfo.extent.height,
11842  imageCreateInfo.extent.depth,
11843  imageCreateInfo.mipLevels,
11844  imageCreateInfo.arrayLayers,
11845  imageCreateInfo.samples,
11846  imageCreateInfo.tiling,
11847  imageCreateInfo.usage,
11848  imageCreateInfo.sharingMode,
11849  imageCreateInfo.initialLayout,
11850  allocCreateInfo.flags,
11851  allocCreateInfo.usage,
11852  allocCreateInfo.requiredFlags,
11853  allocCreateInfo.preferredFlags,
11854  allocCreateInfo.memoryTypeBits,
11855  allocCreateInfo.pool,
11856  allocation,
11857  userDataStr.GetString());
11858  Flush();
11859 }
11860 
11861 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11862  VmaAllocation allocation)
11863 {
11864  CallParams callParams;
11865  GetBasicParams(callParams);
11866 
11867  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11868  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11869  allocation);
11870  Flush();
11871 }
11872 
11873 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11874  VmaAllocation allocation)
11875 {
11876  CallParams callParams;
11877  GetBasicParams(callParams);
11878 
11879  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11880  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11881  allocation);
11882  Flush();
11883 }
11884 
11885 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11886  VmaAllocation allocation)
11887 {
11888  CallParams callParams;
11889  GetBasicParams(callParams);
11890 
11891  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11892  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11893  allocation);
11894  Flush();
11895 }
11896 
11897 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11898  VmaAllocation allocation)
11899 {
11900  CallParams callParams;
11901  GetBasicParams(callParams);
11902 
11903  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11904  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11905  allocation);
11906  Flush();
11907 }
11908 
11909 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11910  VmaPool pool)
11911 {
11912  CallParams callParams;
11913  GetBasicParams(callParams);
11914 
11915  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11916  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11917  pool);
11918  Flush();
11919 }
11920 
11921 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11922 {
11923  if(pUserData != VMA_NULL)
11924  {
11925  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11926  {
11927  m_Str = (const char*)pUserData;
11928  }
11929  else
11930  {
11931  sprintf_s(m_PtrStr, "%p", pUserData);
11932  m_Str = m_PtrStr;
11933  }
11934  }
11935  else
11936  {
11937  m_Str = "";
11938  }
11939 }
11940 
11941 void VmaRecorder::WriteConfiguration(
11942  const VkPhysicalDeviceProperties& devProps,
11943  const VkPhysicalDeviceMemoryProperties& memProps,
11944  bool dedicatedAllocationExtensionEnabled)
11945 {
11946  fprintf(m_File, "Config,Begin\n");
11947 
11948  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11949  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11950  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11951  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11952  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11953  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11954 
11955  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11956  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11957  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11958 
11959  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11960  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11961  {
11962  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11964  }
11965  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11966  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11967  {
11968  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11970  }
11971 
11972  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11973 
11974  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11976  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11977  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11978  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11979  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11980  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11981  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11982  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11983 
11984  fprintf(m_File, "Config,End\n");
11985 }
11986 
11987 void VmaRecorder::GetBasicParams(CallParams& outParams)
11988 {
11989  outParams.threadId = GetCurrentThreadId();
11990 
11991  LARGE_INTEGER counter;
11992  QueryPerformanceCounter(&counter);
11993  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11994 }
11995 
11996 void VmaRecorder::Flush()
11997 {
11998  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11999  {
12000  fflush(m_File);
12001  }
12002 }
12003 
12004 #endif // #if VMA_RECORDING_ENABLED
12005 
12007 // VmaAllocator_T
12008 
12009 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12010  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12011  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12012  m_hDevice(pCreateInfo->device),
12013  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12014  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12015  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12016  m_PreferredLargeHeapBlockSize(0),
12017  m_PhysicalDevice(pCreateInfo->physicalDevice),
12018  m_CurrentFrameIndex(0),
12019  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12020  m_NextPoolId(0)
12022  ,m_pRecorder(VMA_NULL)
12023 #endif
12024 {
12025  if(VMA_DEBUG_DETECT_CORRUPTION)
12026  {
12027  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12028  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12029  }
12030 
12031  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12032 
12033 #if !(VMA_DEDICATED_ALLOCATION)
12035  {
12036  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12037  }
12038 #endif
12039 
12040  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12041  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12042  memset(&m_MemProps, 0, sizeof(m_MemProps));
12043 
12044  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12045  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12046 
12047  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12048  {
12049  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12050  }
12051 
12052  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12053  {
12054  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12055  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12056  }
12057 
12058  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12059 
12060  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12061  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12062 
12063  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12065  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12067 
12068  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12069  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12070 
12071  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12072  {
12073  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12074  {
12075  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12076  if(limit != VK_WHOLE_SIZE)
12077  {
12078  m_HeapSizeLimit[heapIndex] = limit;
12079  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12080  {
12081  m_MemProps.memoryHeaps[heapIndex].size = limit;
12082  }
12083  }
12084  }
12085  }
12086 
12087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12088  {
12089  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12090 
12091  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12092  this,
12093  memTypeIndex,
12094  preferredBlockSize,
12095  0,
12096  SIZE_MAX,
12097  GetBufferImageGranularity(),
12098  pCreateInfo->frameInUseCount,
12099  false, // isCustomPool
12100  false, // explicitBlockSize
12101  false); // linearAlgorithm
12102  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12103  // becase minBlockCount is 0.
12104  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12105 
12106  }
12107 }
12108 
12109 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12110 {
12111  VkResult res = VK_SUCCESS;
12112 
12113  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12114  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12115  {
12116 #if VMA_RECORDING_ENABLED
12117  m_pRecorder = vma_new(this, VmaRecorder)();
12118  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12119  if(res != VK_SUCCESS)
12120  {
12121  return res;
12122  }
12123  m_pRecorder->WriteConfiguration(
12124  m_PhysicalDeviceProperties,
12125  m_MemProps,
12126  m_UseKhrDedicatedAllocation);
12127  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12128 #else
12129  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12130  return VK_ERROR_FEATURE_NOT_PRESENT;
12131 #endif
12132  }
12133 
12134  return res;
12135 }
12136 
12137 VmaAllocator_T::~VmaAllocator_T()
12138 {
12139 #if VMA_RECORDING_ENABLED
12140  if(m_pRecorder != VMA_NULL)
12141  {
12142  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12143  vma_delete(this, m_pRecorder);
12144  }
12145 #endif
12146 
12147  VMA_ASSERT(m_Pools.empty());
12148 
12149  for(size_t i = GetMemoryTypeCount(); i--; )
12150  {
12151  vma_delete(this, m_pDedicatedAllocations[i]);
12152  vma_delete(this, m_pBlockVectors[i]);
12153  }
12154 }
12155 
12156 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12157 {
12158 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12159  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12160  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12161  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12162  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12163  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12164  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12165  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12166  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12167  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12168  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12169  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12170  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12171  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12172  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12173  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12174  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12175 #if VMA_DEDICATED_ALLOCATION
12176  if(m_UseKhrDedicatedAllocation)
12177  {
12178  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12179  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12180  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12181  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12182  }
12183 #endif // #if VMA_DEDICATED_ALLOCATION
12184 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12185 
12186 #define VMA_COPY_IF_NOT_NULL(funcName) \
12187  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12188 
12189  if(pVulkanFunctions != VMA_NULL)
12190  {
12191  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12193  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12194  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12195  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12196  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12197  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12198  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12199  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12200  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12201  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12202  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12203  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12204  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12205  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12206  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12207 #if VMA_DEDICATED_ALLOCATION
12208  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12209  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12210 #endif
12211  }
12212 
12213 #undef VMA_COPY_IF_NOT_NULL
12214 
12215  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12216  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12228  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12229  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12230  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12231  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12232  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12233 #if VMA_DEDICATED_ALLOCATION
12234  if(m_UseKhrDedicatedAllocation)
12235  {
12236  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12237  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12238  }
12239 #endif
12240 }
12241 
12242 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12243 {
12244  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12245  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12246  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12247  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12248 }
12249 
12250 VkResult VmaAllocator_T::AllocateMemoryOfType(
12251  VkDeviceSize size,
12252  VkDeviceSize alignment,
12253  bool dedicatedAllocation,
12254  VkBuffer dedicatedBuffer,
12255  VkImage dedicatedImage,
12256  const VmaAllocationCreateInfo& createInfo,
12257  uint32_t memTypeIndex,
12258  VmaSuballocationType suballocType,
12259  VmaAllocation* pAllocation)
12260 {
12261  VMA_ASSERT(pAllocation != VMA_NULL);
12262  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12263 
12264  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12265 
12266  // If memory type is not HOST_VISIBLE, disable MAPPED.
12267  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12268  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12269  {
12270  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12271  }
12272 
12273  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12274  VMA_ASSERT(blockVector);
12275 
12276  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12277  bool preferDedicatedMemory =
12278  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12279  dedicatedAllocation ||
12280  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12281  size > preferredBlockSize / 2;
12282 
12283  if(preferDedicatedMemory &&
12284  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12285  finalCreateInfo.pool == VK_NULL_HANDLE)
12286  {
12288  }
12289 
12290  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12291  {
12292  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12293  {
12294  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12295  }
12296  else
12297  {
12298  return AllocateDedicatedMemory(
12299  size,
12300  suballocType,
12301  memTypeIndex,
12302  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12304  finalCreateInfo.pUserData,
12305  dedicatedBuffer,
12306  dedicatedImage,
12307  pAllocation);
12308  }
12309  }
12310  else
12311  {
12312  VkResult res = blockVector->Allocate(
12313  VK_NULL_HANDLE, // hCurrentPool
12314  m_CurrentFrameIndex.load(),
12315  size,
12316  alignment,
12317  finalCreateInfo,
12318  suballocType,
12319  pAllocation);
12320  if(res == VK_SUCCESS)
12321  {
12322  return res;
12323  }
12324 
12325  // 5. Try dedicated memory.
12326  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12327  {
12328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12329  }
12330  else
12331  {
12332  res = AllocateDedicatedMemory(
12333  size,
12334  suballocType,
12335  memTypeIndex,
12336  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12338  finalCreateInfo.pUserData,
12339  dedicatedBuffer,
12340  dedicatedImage,
12341  pAllocation);
12342  if(res == VK_SUCCESS)
12343  {
12344  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12345  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12346  return VK_SUCCESS;
12347  }
12348  else
12349  {
12350  // Everything failed: Return error code.
12351  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12352  return res;
12353  }
12354  }
12355  }
12356 }
12357 
12358 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12359  VkDeviceSize size,
12360  VmaSuballocationType suballocType,
12361  uint32_t memTypeIndex,
12362  bool map,
12363  bool isUserDataString,
12364  void* pUserData,
12365  VkBuffer dedicatedBuffer,
12366  VkImage dedicatedImage,
12367  VmaAllocation* pAllocation)
12368 {
12369  VMA_ASSERT(pAllocation);
12370 
12371  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12372  allocInfo.memoryTypeIndex = memTypeIndex;
12373  allocInfo.allocationSize = size;
12374 
12375 #if VMA_DEDICATED_ALLOCATION
12376  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12377  if(m_UseKhrDedicatedAllocation)
12378  {
12379  if(dedicatedBuffer != VK_NULL_HANDLE)
12380  {
12381  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12382  dedicatedAllocInfo.buffer = dedicatedBuffer;
12383  allocInfo.pNext = &dedicatedAllocInfo;
12384  }
12385  else if(dedicatedImage != VK_NULL_HANDLE)
12386  {
12387  dedicatedAllocInfo.image = dedicatedImage;
12388  allocInfo.pNext = &dedicatedAllocInfo;
12389  }
12390  }
12391 #endif // #if VMA_DEDICATED_ALLOCATION
12392 
12393  // Allocate VkDeviceMemory.
12394  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12395  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12396  if(res < 0)
12397  {
12398  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12399  return res;
12400  }
12401 
12402  void* pMappedData = VMA_NULL;
12403  if(map)
12404  {
12405  res = (*m_VulkanFunctions.vkMapMemory)(
12406  m_hDevice,
12407  hMemory,
12408  0,
12409  VK_WHOLE_SIZE,
12410  0,
12411  &pMappedData);
12412  if(res < 0)
12413  {
12414  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12415  FreeVulkanMemory(memTypeIndex, size, hMemory);
12416  return res;
12417  }
12418  }
12419 
12420  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12421  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12422  (*pAllocation)->SetUserData(this, pUserData);
12423  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12424  {
12425  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12426  }
12427 
12428  // Register it in m_pDedicatedAllocations.
12429  {
12430  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12431  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12432  VMA_ASSERT(pDedicatedAllocations);
12433  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12434  }
12435 
12436  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12437 
12438  return VK_SUCCESS;
12439 }
12440 
12441 void VmaAllocator_T::GetBufferMemoryRequirements(
12442  VkBuffer hBuffer,
12443  VkMemoryRequirements& memReq,
12444  bool& requiresDedicatedAllocation,
12445  bool& prefersDedicatedAllocation) const
12446 {
12447 #if VMA_DEDICATED_ALLOCATION
12448  if(m_UseKhrDedicatedAllocation)
12449  {
12450  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12451  memReqInfo.buffer = hBuffer;
12452 
12453  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12454 
12455  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12456  memReq2.pNext = &memDedicatedReq;
12457 
12458  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12459 
12460  memReq = memReq2.memoryRequirements;
12461  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12462  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12463  }
12464  else
12465 #endif // #if VMA_DEDICATED_ALLOCATION
12466  {
12467  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12468  requiresDedicatedAllocation = false;
12469  prefersDedicatedAllocation = false;
12470  }
12471 }
12472 
12473 void VmaAllocator_T::GetImageMemoryRequirements(
12474  VkImage hImage,
12475  VkMemoryRequirements& memReq,
12476  bool& requiresDedicatedAllocation,
12477  bool& prefersDedicatedAllocation) const
12478 {
12479 #if VMA_DEDICATED_ALLOCATION
12480  if(m_UseKhrDedicatedAllocation)
12481  {
12482  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12483  memReqInfo.image = hImage;
12484 
12485  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12486 
12487  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12488  memReq2.pNext = &memDedicatedReq;
12489 
12490  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12491 
12492  memReq = memReq2.memoryRequirements;
12493  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12494  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12495  }
12496  else
12497 #endif // #if VMA_DEDICATED_ALLOCATION
12498  {
12499  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12500  requiresDedicatedAllocation = false;
12501  prefersDedicatedAllocation = false;
12502  }
12503 }
12504 
12505 VkResult VmaAllocator_T::AllocateMemory(
12506  const VkMemoryRequirements& vkMemReq,
12507  bool requiresDedicatedAllocation,
12508  bool prefersDedicatedAllocation,
12509  VkBuffer dedicatedBuffer,
12510  VkImage dedicatedImage,
12511  const VmaAllocationCreateInfo& createInfo,
12512  VmaSuballocationType suballocType,
12513  VmaAllocation* pAllocation)
12514 {
12515  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12516 
12517  if(vkMemReq.size == 0)
12518  {
12519  return VK_ERROR_VALIDATION_FAILED_EXT;
12520  }
12521  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12522  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12523  {
12524  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12525  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12526  }
12527  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12529  {
12530  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12531  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12532  }
12533  if(requiresDedicatedAllocation)
12534  {
12535  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12536  {
12537  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12539  }
12540  if(createInfo.pool != VK_NULL_HANDLE)
12541  {
12542  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12543  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12544  }
12545  }
12546  if((createInfo.pool != VK_NULL_HANDLE) &&
12547  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12548  {
12549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12551  }
12552 
12553  if(createInfo.pool != VK_NULL_HANDLE)
12554  {
12555  const VkDeviceSize alignmentForPool = VMA_MAX(
12556  vkMemReq.alignment,
12557  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12558  return createInfo.pool->m_BlockVector.Allocate(
12559  createInfo.pool,
12560  m_CurrentFrameIndex.load(),
12561  vkMemReq.size,
12562  alignmentForPool,
12563  createInfo,
12564  suballocType,
12565  pAllocation);
12566  }
12567  else
12568  {
12569  // Bit mask of memory Vulkan types acceptable for this allocation.
12570  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12571  uint32_t memTypeIndex = UINT32_MAX;
12572  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12573  if(res == VK_SUCCESS)
12574  {
12575  VkDeviceSize alignmentForMemType = VMA_MAX(
12576  vkMemReq.alignment,
12577  GetMemoryTypeMinAlignment(memTypeIndex));
12578 
12579  res = AllocateMemoryOfType(
12580  vkMemReq.size,
12581  alignmentForMemType,
12582  requiresDedicatedAllocation || prefersDedicatedAllocation,
12583  dedicatedBuffer,
12584  dedicatedImage,
12585  createInfo,
12586  memTypeIndex,
12587  suballocType,
12588  pAllocation);
12589  // Succeeded on first try.
12590  if(res == VK_SUCCESS)
12591  {
12592  return res;
12593  }
12594  // Allocation from this memory type failed. Try other compatible memory types.
12595  else
12596  {
12597  for(;;)
12598  {
12599  // Remove old memTypeIndex from list of possibilities.
12600  memoryTypeBits &= ~(1u << memTypeIndex);
12601  // Find alternative memTypeIndex.
12602  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12603  if(res == VK_SUCCESS)
12604  {
12605  alignmentForMemType = VMA_MAX(
12606  vkMemReq.alignment,
12607  GetMemoryTypeMinAlignment(memTypeIndex));
12608 
12609  res = AllocateMemoryOfType(
12610  vkMemReq.size,
12611  alignmentForMemType,
12612  requiresDedicatedAllocation || prefersDedicatedAllocation,
12613  dedicatedBuffer,
12614  dedicatedImage,
12615  createInfo,
12616  memTypeIndex,
12617  suballocType,
12618  pAllocation);
12619  // Allocation from this alternative memory type succeeded.
12620  if(res == VK_SUCCESS)
12621  {
12622  return res;
12623  }
12624  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12625  }
12626  // No other matching memory type index could be found.
12627  else
12628  {
12629  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12630  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12631  }
12632  }
12633  }
12634  }
12635  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12636  else
12637  return res;
12638  }
12639 }
12640 
12641 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12642 {
12643  VMA_ASSERT(allocation);
12644 
12645  if(TouchAllocation(allocation))
12646  {
12647  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12648  {
12649  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12650  }
12651 
12652  switch(allocation->GetType())
12653  {
12654  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12655  {
12656  VmaBlockVector* pBlockVector = VMA_NULL;
12657  VmaPool hPool = allocation->GetPool();
12658  if(hPool != VK_NULL_HANDLE)
12659  {
12660  pBlockVector = &hPool->m_BlockVector;
12661  }
12662  else
12663  {
12664  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12665  pBlockVector = m_pBlockVectors[memTypeIndex];
12666  }
12667  pBlockVector->Free(allocation);
12668  }
12669  break;
12670  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12671  FreeDedicatedMemory(allocation);
12672  break;
12673  default:
12674  VMA_ASSERT(0);
12675  }
12676  }
12677 
12678  allocation->SetUserData(this, VMA_NULL);
12679  vma_delete(this, allocation);
12680 }
12681 
12682 VkResult VmaAllocator_T::ResizeAllocation(
12683  const VmaAllocation alloc,
12684  VkDeviceSize newSize)
12685 {
12686  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12687  {
12688  return VK_ERROR_VALIDATION_FAILED_EXT;
12689  }
12690  if(newSize == alloc->GetSize())
12691  {
12692  return VK_SUCCESS;
12693  }
12694 
12695  switch(alloc->GetType())
12696  {
12697  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12698  return VK_ERROR_FEATURE_NOT_PRESENT;
12699  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12700  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12701  {
12702  alloc->ChangeSize(newSize);
12703  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12704  return VK_SUCCESS;
12705  }
12706  else
12707  {
12708  return VK_ERROR_OUT_OF_POOL_MEMORY;
12709  }
12710  default:
12711  VMA_ASSERT(0);
12712  return VK_ERROR_VALIDATION_FAILED_EXT;
12713  }
12714 }
12715 
12716 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12717 {
12718  // Initialize.
12719  InitStatInfo(pStats->total);
12720  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12721  InitStatInfo(pStats->memoryType[i]);
12722  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12723  InitStatInfo(pStats->memoryHeap[i]);
12724 
12725  // Process default pools.
12726  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12727  {
12728  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12729  VMA_ASSERT(pBlockVector);
12730  pBlockVector->AddStats(pStats);
12731  }
12732 
12733  // Process custom pools.
12734  {
12735  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12736  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12737  {
12738  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12739  }
12740  }
12741 
12742  // Process dedicated allocations.
12743  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12744  {
12745  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12746  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12747  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12748  VMA_ASSERT(pDedicatedAllocVector);
12749  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12750  {
12751  VmaStatInfo allocationStatInfo;
12752  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12753  VmaAddStatInfo(pStats->total, allocationStatInfo);
12754  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12755  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12756  }
12757  }
12758 
12759  // Postprocess.
12760  VmaPostprocessCalcStatInfo(pStats->total);
12761  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12762  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12763  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12764  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12765 }
12766 
12767 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12768 
12769 VkResult VmaAllocator_T::Defragment(
12770  VmaAllocation* pAllocations,
12771  size_t allocationCount,
12772  VkBool32* pAllocationsChanged,
12773  const VmaDefragmentationInfo* pDefragmentationInfo,
12774  VmaDefragmentationStats* pDefragmentationStats)
12775 {
12776  if(pAllocationsChanged != VMA_NULL)
12777  {
12778  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12779  }
12780  if(pDefragmentationStats != VMA_NULL)
12781  {
12782  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12783  }
12784 
12785  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12786 
12787  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12788 
12789  const size_t poolCount = m_Pools.size();
12790 
12791  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12792  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12793  {
12794  VmaAllocation hAlloc = pAllocations[allocIndex];
12795  VMA_ASSERT(hAlloc);
12796  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12797  // DedicatedAlloc cannot be defragmented.
12798  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12799  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12800  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12801  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12802  // Lost allocation cannot be defragmented.
12803  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12804  {
12805  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12806 
12807  const VmaPool hAllocPool = hAlloc->GetPool();
12808  // This allocation belongs to custom pool.
12809  if(hAllocPool != VK_NULL_HANDLE)
12810  {
12811  // Pools with linear or buddy algorithm are not defragmented.
12812  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12813  {
12814  pAllocBlockVector = &hAllocPool->m_BlockVector;
12815  }
12816  }
12817  // This allocation belongs to general pool.
12818  else
12819  {
12820  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12821  }
12822 
12823  if(pAllocBlockVector != VMA_NULL)
12824  {
12825  VmaDefragmentator* const pDefragmentator =
12826  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12827  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12828  &pAllocationsChanged[allocIndex] : VMA_NULL;
12829  pDefragmentator->AddAllocation(hAlloc, pChanged);
12830  }
12831  }
12832  }
12833 
12834  VkResult result = VK_SUCCESS;
12835 
12836  // ======== Main processing.
12837 
12838  VkDeviceSize maxBytesToMove = SIZE_MAX;
12839  uint32_t maxAllocationsToMove = UINT32_MAX;
12840  if(pDefragmentationInfo != VMA_NULL)
12841  {
12842  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12843  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12844  }
12845 
12846  // Process standard memory.
12847  for(uint32_t memTypeIndex = 0;
12848  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12849  ++memTypeIndex)
12850  {
12851  // Only HOST_VISIBLE memory types can be defragmented.
12852  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12853  {
12854  result = m_pBlockVectors[memTypeIndex]->Defragment(
12855  pDefragmentationStats,
12856  maxBytesToMove,
12857  maxAllocationsToMove);
12858  }
12859  }
12860 
12861  // Process custom pools.
12862  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12863  {
12864  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12865  pDefragmentationStats,
12866  maxBytesToMove,
12867  maxAllocationsToMove);
12868  }
12869 
12870  // ======== Destroy defragmentators.
12871 
12872  // Process custom pools.
12873  for(size_t poolIndex = poolCount; poolIndex--; )
12874  {
12875  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12876  }
12877 
12878  // Process standard memory.
12879  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12880  {
12881  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12882  {
12883  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12884  }
12885  }
12886 
12887  return result;
12888 }
12889 
12890 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12891 {
12892  if(hAllocation->CanBecomeLost())
12893  {
12894  /*
12895  Warning: This is a carefully designed algorithm.
12896  Do not modify unless you really know what you're doing :)
12897  */
12898  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12899  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12900  for(;;)
12901  {
12902  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12903  {
12904  pAllocationInfo->memoryType = UINT32_MAX;
12905  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12906  pAllocationInfo->offset = 0;
12907  pAllocationInfo->size = hAllocation->GetSize();
12908  pAllocationInfo->pMappedData = VMA_NULL;
12909  pAllocationInfo->pUserData = hAllocation->GetUserData();
12910  return;
12911  }
12912  else if(localLastUseFrameIndex == localCurrFrameIndex)
12913  {
12914  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12915  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12916  pAllocationInfo->offset = hAllocation->GetOffset();
12917  pAllocationInfo->size = hAllocation->GetSize();
12918  pAllocationInfo->pMappedData = VMA_NULL;
12919  pAllocationInfo->pUserData = hAllocation->GetUserData();
12920  return;
12921  }
12922  else // Last use time earlier than current time.
12923  {
12924  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12925  {
12926  localLastUseFrameIndex = localCurrFrameIndex;
12927  }
12928  }
12929  }
12930  }
12931  else
12932  {
12933 #if VMA_STATS_STRING_ENABLED
12934  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12935  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12936  for(;;)
12937  {
12938  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12939  if(localLastUseFrameIndex == localCurrFrameIndex)
12940  {
12941  break;
12942  }
12943  else // Last use time earlier than current time.
12944  {
12945  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12946  {
12947  localLastUseFrameIndex = localCurrFrameIndex;
12948  }
12949  }
12950  }
12951 #endif
12952 
12953  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12954  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12955  pAllocationInfo->offset = hAllocation->GetOffset();
12956  pAllocationInfo->size = hAllocation->GetSize();
12957  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12958  pAllocationInfo->pUserData = hAllocation->GetUserData();
12959  }
12960 }
12961 
12962 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12963 {
12964  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12965  if(hAllocation->CanBecomeLost())
12966  {
12967  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12968  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12969  for(;;)
12970  {
12971  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12972  {
12973  return false;
12974  }
12975  else if(localLastUseFrameIndex == localCurrFrameIndex)
12976  {
12977  return true;
12978  }
12979  else // Last use time earlier than current time.
12980  {
12981  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12982  {
12983  localLastUseFrameIndex = localCurrFrameIndex;
12984  }
12985  }
12986  }
12987  }
12988  else
12989  {
12990 #if VMA_STATS_STRING_ENABLED
12991  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12992  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12993  for(;;)
12994  {
12995  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12996  if(localLastUseFrameIndex == localCurrFrameIndex)
12997  {
12998  break;
12999  }
13000  else // Last use time earlier than current time.
13001  {
13002  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
13003  {
13004  localLastUseFrameIndex = localCurrFrameIndex;
13005  }
13006  }
13007  }
13008 #endif
13009 
13010  return true;
13011  }
13012 }
13013 
13014 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13015 {
13016  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13017 
13018  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13019 
13020  if(newCreateInfo.maxBlockCount == 0)
13021  {
13022  newCreateInfo.maxBlockCount = SIZE_MAX;
13023  }
13024  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13025  {
13026  return VK_ERROR_INITIALIZATION_FAILED;
13027  }
13028 
13029  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13030 
13031  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13032 
13033  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13034  if(res != VK_SUCCESS)
13035  {
13036  vma_delete(this, *pPool);
13037  *pPool = VMA_NULL;
13038  return res;
13039  }
13040 
13041  // Add to m_Pools.
13042  {
13043  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13044  (*pPool)->SetId(m_NextPoolId++);
13045  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13046  }
13047 
13048  return VK_SUCCESS;
13049 }
13050 
13051 void VmaAllocator_T::DestroyPool(VmaPool pool)
13052 {
13053  // Remove from m_Pools.
13054  {
13055  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13056  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13057  VMA_ASSERT(success && "Pool not found in Allocator.");
13058  }
13059 
13060  vma_delete(this, pool);
13061 }
13062 
13063 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13064 {
13065  pool->m_BlockVector.GetPoolStats(pPoolStats);
13066 }
13067 
13068 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13069 {
13070  m_CurrentFrameIndex.store(frameIndex);
13071 }
13072 
13073 void VmaAllocator_T::MakePoolAllocationsLost(
13074  VmaPool hPool,
13075  size_t* pLostAllocationCount)
13076 {
13077  hPool->m_BlockVector.MakePoolAllocationsLost(
13078  m_CurrentFrameIndex.load(),
13079  pLostAllocationCount);
13080 }
13081 
13082 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13083 {
13084  return hPool->m_BlockVector.CheckCorruption();
13085 }
13086 
13087 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13088 {
13089  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13090 
13091  // Process default pools.
13092  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13093  {
13094  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13095  {
13096  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13097  VMA_ASSERT(pBlockVector);
13098  VkResult localRes = pBlockVector->CheckCorruption();
13099  switch(localRes)
13100  {
13101  case VK_ERROR_FEATURE_NOT_PRESENT:
13102  break;
13103  case VK_SUCCESS:
13104  finalRes = VK_SUCCESS;
13105  break;
13106  default:
13107  return localRes;
13108  }
13109  }
13110  }
13111 
13112  // Process custom pools.
13113  {
13114  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13115  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13116  {
13117  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13118  {
13119  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13120  switch(localRes)
13121  {
13122  case VK_ERROR_FEATURE_NOT_PRESENT:
13123  break;
13124  case VK_SUCCESS:
13125  finalRes = VK_SUCCESS;
13126  break;
13127  default:
13128  return localRes;
13129  }
13130  }
13131  }
13132  }
13133 
13134  return finalRes;
13135 }
13136 
13137 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13138 {
13139  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13140  (*pAllocation)->InitLost();
13141 }
13142 
13143 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13144 {
13145  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13146 
13147  VkResult res;
13148  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13149  {
13150  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13151  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13152  {
13153  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13154  if(res == VK_SUCCESS)
13155  {
13156  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13157  }
13158  }
13159  else
13160  {
13161  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13162  }
13163  }
13164  else
13165  {
13166  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13167  }
13168 
13169  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13170  {
13171  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13172  }
13173 
13174  return res;
13175 }
13176 
13177 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13178 {
13179  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13180  {
13181  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13182  }
13183 
13184  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13185 
13186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13187  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13188  {
13189  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13190  m_HeapSizeLimit[heapIndex] += size;
13191  }
13192 }
13193 
13194 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13195 {
13196  if(hAllocation->CanBecomeLost())
13197  {
13198  return VK_ERROR_MEMORY_MAP_FAILED;
13199  }
13200 
13201  switch(hAllocation->GetType())
13202  {
13203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13204  {
13205  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13206  char *pBytes = VMA_NULL;
13207  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13208  if(res == VK_SUCCESS)
13209  {
13210  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13211  hAllocation->BlockAllocMap();
13212  }
13213  return res;
13214  }
13215  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13216  return hAllocation->DedicatedAllocMap(this, ppData);
13217  default:
13218  VMA_ASSERT(0);
13219  return VK_ERROR_MEMORY_MAP_FAILED;
13220  }
13221 }
13222 
13223 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13224 {
13225  switch(hAllocation->GetType())
13226  {
13227  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13228  {
13229  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13230  hAllocation->BlockAllocUnmap();
13231  pBlock->Unmap(this, 1);
13232  }
13233  break;
13234  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13235  hAllocation->DedicatedAllocUnmap(this);
13236  break;
13237  default:
13238  VMA_ASSERT(0);
13239  }
13240 }
13241 
13242 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13243 {
13244  VkResult res = VK_SUCCESS;
13245  switch(hAllocation->GetType())
13246  {
13247  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13248  res = GetVulkanFunctions().vkBindBufferMemory(
13249  m_hDevice,
13250  hBuffer,
13251  hAllocation->GetMemory(),
13252  0); //memoryOffset
13253  break;
13254  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13255  {
13256  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13257  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13258  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13259  break;
13260  }
13261  default:
13262  VMA_ASSERT(0);
13263  }
13264  return res;
13265 }
13266 
13267 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13268 {
13269  VkResult res = VK_SUCCESS;
13270  switch(hAllocation->GetType())
13271  {
13272  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13273  res = GetVulkanFunctions().vkBindImageMemory(
13274  m_hDevice,
13275  hImage,
13276  hAllocation->GetMemory(),
13277  0); //memoryOffset
13278  break;
13279  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13280  {
13281  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13282  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13283  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13284  break;
13285  }
13286  default:
13287  VMA_ASSERT(0);
13288  }
13289  return res;
13290 }
13291 
13292 void VmaAllocator_T::FlushOrInvalidateAllocation(
13293  VmaAllocation hAllocation,
13294  VkDeviceSize offset, VkDeviceSize size,
13295  VMA_CACHE_OPERATION op)
13296 {
13297  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13298  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13299  {
13300  const VkDeviceSize allocationSize = hAllocation->GetSize();
13301  VMA_ASSERT(offset <= allocationSize);
13302 
13303  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13304 
13305  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13306  memRange.memory = hAllocation->GetMemory();
13307 
13308  switch(hAllocation->GetType())
13309  {
13310  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13311  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13312  if(size == VK_WHOLE_SIZE)
13313  {
13314  memRange.size = allocationSize - memRange.offset;
13315  }
13316  else
13317  {
13318  VMA_ASSERT(offset + size <= allocationSize);
13319  memRange.size = VMA_MIN(
13320  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13321  allocationSize - memRange.offset);
13322  }
13323  break;
13324 
13325  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13326  {
13327  // 1. Still within this allocation.
13328  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13329  if(size == VK_WHOLE_SIZE)
13330  {
13331  size = allocationSize - offset;
13332  }
13333  else
13334  {
13335  VMA_ASSERT(offset + size <= allocationSize);
13336  }
13337  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13338 
13339  // 2. Adjust to whole block.
13340  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13341  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13342  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13343  memRange.offset += allocationOffset;
13344  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13345 
13346  break;
13347  }
13348 
13349  default:
13350  VMA_ASSERT(0);
13351  }
13352 
13353  switch(op)
13354  {
13355  case VMA_CACHE_FLUSH:
13356  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13357  break;
13358  case VMA_CACHE_INVALIDATE:
13359  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13360  break;
13361  default:
13362  VMA_ASSERT(0);
13363  }
13364  }
13365  // else: Just ignore this call.
13366 }
13367 
13368 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13369 {
13370  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13371 
13372  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13373  {
13374  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13375  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13376  VMA_ASSERT(pDedicatedAllocations);
13377  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13378  VMA_ASSERT(success);
13379  }
13380 
13381  VkDeviceMemory hMemory = allocation->GetMemory();
13382 
13383  /*
13384  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13385  before vkFreeMemory.
13386 
13387  if(allocation->GetMappedData() != VMA_NULL)
13388  {
13389  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13390  }
13391  */
13392 
13393  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13394 
13395  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13396 }
13397 
13398 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13399 {
13400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13401  !hAllocation->CanBecomeLost() &&
13402  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13403  {
13404  void* pData = VMA_NULL;
13405  VkResult res = Map(hAllocation, &pData);
13406  if(res == VK_SUCCESS)
13407  {
13408  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13409  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13410  Unmap(hAllocation);
13411  }
13412  else
13413  {
13414  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13415  }
13416  }
13417 }
13418 
13419 #if VMA_STATS_STRING_ENABLED
13420 
13421 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13422 {
13423  bool dedicatedAllocationsStarted = false;
13424  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13425  {
13426  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13427  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13428  VMA_ASSERT(pDedicatedAllocVector);
13429  if(pDedicatedAllocVector->empty() == false)
13430  {
13431  if(dedicatedAllocationsStarted == false)
13432  {
13433  dedicatedAllocationsStarted = true;
13434  json.WriteString("DedicatedAllocations");
13435  json.BeginObject();
13436  }
13437 
13438  json.BeginString("Type ");
13439  json.ContinueString(memTypeIndex);
13440  json.EndString();
13441 
13442  json.BeginArray();
13443 
13444  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13445  {
13446  json.BeginObject(true);
13447  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13448  hAlloc->PrintParameters(json);
13449  json.EndObject();
13450  }
13451 
13452  json.EndArray();
13453  }
13454  }
13455  if(dedicatedAllocationsStarted)
13456  {
13457  json.EndObject();
13458  }
13459 
13460  {
13461  bool allocationsStarted = false;
13462  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13463  {
13464  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13465  {
13466  if(allocationsStarted == false)
13467  {
13468  allocationsStarted = true;
13469  json.WriteString("DefaultPools");
13470  json.BeginObject();
13471  }
13472 
13473  json.BeginString("Type ");
13474  json.ContinueString(memTypeIndex);
13475  json.EndString();
13476 
13477  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13478  }
13479  }
13480  if(allocationsStarted)
13481  {
13482  json.EndObject();
13483  }
13484  }
13485 
13486  // Custom pools
13487  {
13488  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13489  const size_t poolCount = m_Pools.size();
13490  if(poolCount > 0)
13491  {
13492  json.WriteString("Pools");
13493  json.BeginObject();
13494  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13495  {
13496  json.BeginString();
13497  json.ContinueString(m_Pools[poolIndex]->GetId());
13498  json.EndString();
13499 
13500  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13501  }
13502  json.EndObject();
13503  }
13504  }
13505 }
13506 
13507 #endif // #if VMA_STATS_STRING_ENABLED
13508 
13510 // Public interface
13511 
13512 VkResult vmaCreateAllocator(
13513  const VmaAllocatorCreateInfo* pCreateInfo,
13514  VmaAllocator* pAllocator)
13515 {
13516  VMA_ASSERT(pCreateInfo && pAllocator);
13517  VMA_DEBUG_LOG("vmaCreateAllocator");
13518  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13519  return (*pAllocator)->Init(pCreateInfo);
13520 }
13521 
13522 void vmaDestroyAllocator(
13523  VmaAllocator allocator)
13524 {
13525  if(allocator != VK_NULL_HANDLE)
13526  {
13527  VMA_DEBUG_LOG("vmaDestroyAllocator");
13528  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13529  vma_delete(&allocationCallbacks, allocator);
13530  }
13531 }
13532 
13534  VmaAllocator allocator,
13535  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13536 {
13537  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13538  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13539 }
13540 
13542  VmaAllocator allocator,
13543  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13544 {
13545  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13546  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13547 }
13548 
13550  VmaAllocator allocator,
13551  uint32_t memoryTypeIndex,
13552  VkMemoryPropertyFlags* pFlags)
13553 {
13554  VMA_ASSERT(allocator && pFlags);
13555  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13556  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13557 }
13558 
13560  VmaAllocator allocator,
13561  uint32_t frameIndex)
13562 {
13563  VMA_ASSERT(allocator);
13564  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13565 
13566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13567 
13568  allocator->SetCurrentFrameIndex(frameIndex);
13569 }
13570 
13571 void vmaCalculateStats(
13572  VmaAllocator allocator,
13573  VmaStats* pStats)
13574 {
13575  VMA_ASSERT(allocator && pStats);
13576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13577  allocator->CalculateStats(pStats);
13578 }
13579 
13580 #if VMA_STATS_STRING_ENABLED
13581 
13582 void vmaBuildStatsString(
13583  VmaAllocator allocator,
13584  char** ppStatsString,
13585  VkBool32 detailedMap)
13586 {
13587  VMA_ASSERT(allocator && ppStatsString);
13588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13589 
13590  VmaStringBuilder sb(allocator);
13591  {
13592  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13593  json.BeginObject();
13594 
13595  VmaStats stats;
13596  allocator->CalculateStats(&stats);
13597 
13598  json.WriteString("Total");
13599  VmaPrintStatInfo(json, stats.total);
13600 
13601  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13602  {
13603  json.BeginString("Heap ");
13604  json.ContinueString(heapIndex);
13605  json.EndString();
13606  json.BeginObject();
13607 
13608  json.WriteString("Size");
13609  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13610 
13611  json.WriteString("Flags");
13612  json.BeginArray(true);
13613  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13614  {
13615  json.WriteString("DEVICE_LOCAL");
13616  }
13617  json.EndArray();
13618 
13619  if(stats.memoryHeap[heapIndex].blockCount > 0)
13620  {
13621  json.WriteString("Stats");
13622  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13623  }
13624 
13625  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13626  {
13627  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13628  {
13629  json.BeginString("Type ");
13630  json.ContinueString(typeIndex);
13631  json.EndString();
13632 
13633  json.BeginObject();
13634 
13635  json.WriteString("Flags");
13636  json.BeginArray(true);
13637  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13638  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13639  {
13640  json.WriteString("DEVICE_LOCAL");
13641  }
13642  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13643  {
13644  json.WriteString("HOST_VISIBLE");
13645  }
13646  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13647  {
13648  json.WriteString("HOST_COHERENT");
13649  }
13650  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13651  {
13652  json.WriteString("HOST_CACHED");
13653  }
13654  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13655  {
13656  json.WriteString("LAZILY_ALLOCATED");
13657  }
13658  json.EndArray();
13659 
13660  if(stats.memoryType[typeIndex].blockCount > 0)
13661  {
13662  json.WriteString("Stats");
13663  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13664  }
13665 
13666  json.EndObject();
13667  }
13668  }
13669 
13670  json.EndObject();
13671  }
13672  if(detailedMap == VK_TRUE)
13673  {
13674  allocator->PrintDetailedMap(json);
13675  }
13676 
13677  json.EndObject();
13678  }
13679 
13680  const size_t len = sb.GetLength();
13681  char* const pChars = vma_new_array(allocator, char, len + 1);
13682  if(len > 0)
13683  {
13684  memcpy(pChars, sb.GetData(), len);
13685  }
13686  pChars[len] = '\0';
13687  *ppStatsString = pChars;
13688 }
13689 
13690 void vmaFreeStatsString(
13691  VmaAllocator allocator,
13692  char* pStatsString)
13693 {
13694  if(pStatsString != VMA_NULL)
13695  {
13696  VMA_ASSERT(allocator);
13697  size_t len = strlen(pStatsString);
13698  vma_delete_array(allocator, pStatsString, len + 1);
13699  }
13700 }
13701 
13702 #endif // #if VMA_STATS_STRING_ENABLED
13703 
13704 /*
13705 This function is not protected by any mutex because it just reads immutable data.
13706 */
13707 VkResult vmaFindMemoryTypeIndex(
13708  VmaAllocator allocator,
13709  uint32_t memoryTypeBits,
13710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13711  uint32_t* pMemoryTypeIndex)
13712 {
13713  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13714  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13715  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13716 
13717  if(pAllocationCreateInfo->memoryTypeBits != 0)
13718  {
13719  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13720  }
13721 
13722  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13723  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13724 
13725  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13726  if(mapped)
13727  {
13728  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13729  }
13730 
13731  // Convert usage to requiredFlags and preferredFlags.
13732  switch(pAllocationCreateInfo->usage)
13733  {
13735  break;
13737  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13738  {
13739  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13740  }
13741  break;
13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13744  break;
13746  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13747  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13748  {
13749  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13750  }
13751  break;
13753  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13754  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13755  break;
13756  default:
13757  break;
13758  }
13759 
13760  *pMemoryTypeIndex = UINT32_MAX;
13761  uint32_t minCost = UINT32_MAX;
13762  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13763  memTypeIndex < allocator->GetMemoryTypeCount();
13764  ++memTypeIndex, memTypeBit <<= 1)
13765  {
13766  // This memory type is acceptable according to memoryTypeBits bitmask.
13767  if((memTypeBit & memoryTypeBits) != 0)
13768  {
13769  const VkMemoryPropertyFlags currFlags =
13770  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13771  // This memory type contains requiredFlags.
13772  if((requiredFlags & ~currFlags) == 0)
13773  {
13774  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13775  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13776  // Remember memory type with lowest cost.
13777  if(currCost < minCost)
13778  {
13779  *pMemoryTypeIndex = memTypeIndex;
13780  if(currCost == 0)
13781  {
13782  return VK_SUCCESS;
13783  }
13784  minCost = currCost;
13785  }
13786  }
13787  }
13788  }
13789  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13790 }
13791 
13793  VmaAllocator allocator,
13794  const VkBufferCreateInfo* pBufferCreateInfo,
13795  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13796  uint32_t* pMemoryTypeIndex)
13797 {
13798  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13799  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13800  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13801  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13802 
13803  const VkDevice hDev = allocator->m_hDevice;
13804  VkBuffer hBuffer = VK_NULL_HANDLE;
13805  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13806  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13807  if(res == VK_SUCCESS)
13808  {
13809  VkMemoryRequirements memReq = {};
13810  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13811  hDev, hBuffer, &memReq);
13812 
13813  res = vmaFindMemoryTypeIndex(
13814  allocator,
13815  memReq.memoryTypeBits,
13816  pAllocationCreateInfo,
13817  pMemoryTypeIndex);
13818 
13819  allocator->GetVulkanFunctions().vkDestroyBuffer(
13820  hDev, hBuffer, allocator->GetAllocationCallbacks());
13821  }
13822  return res;
13823 }
13824 
13826  VmaAllocator allocator,
13827  const VkImageCreateInfo* pImageCreateInfo,
13828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13829  uint32_t* pMemoryTypeIndex)
13830 {
13831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13832  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13833  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13834  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13835 
13836  const VkDevice hDev = allocator->m_hDevice;
13837  VkImage hImage = VK_NULL_HANDLE;
13838  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13839  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13840  if(res == VK_SUCCESS)
13841  {
13842  VkMemoryRequirements memReq = {};
13843  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13844  hDev, hImage, &memReq);
13845 
13846  res = vmaFindMemoryTypeIndex(
13847  allocator,
13848  memReq.memoryTypeBits,
13849  pAllocationCreateInfo,
13850  pMemoryTypeIndex);
13851 
13852  allocator->GetVulkanFunctions().vkDestroyImage(
13853  hDev, hImage, allocator->GetAllocationCallbacks());
13854  }
13855  return res;
13856 }
13857 
13858 VkResult vmaCreatePool(
13859  VmaAllocator allocator,
13860  const VmaPoolCreateInfo* pCreateInfo,
13861  VmaPool* pPool)
13862 {
13863  VMA_ASSERT(allocator && pCreateInfo && pPool);
13864 
13865  VMA_DEBUG_LOG("vmaCreatePool");
13866 
13867  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13868 
13869  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13870 
13871 #if VMA_RECORDING_ENABLED
13872  if(allocator->GetRecorder() != VMA_NULL)
13873  {
13874  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13875  }
13876 #endif
13877 
13878  return res;
13879 }
13880 
13881 void vmaDestroyPool(
13882  VmaAllocator allocator,
13883  VmaPool pool)
13884 {
13885  VMA_ASSERT(allocator);
13886 
13887  if(pool == VK_NULL_HANDLE)
13888  {
13889  return;
13890  }
13891 
13892  VMA_DEBUG_LOG("vmaDestroyPool");
13893 
13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13895 
13896 #if VMA_RECORDING_ENABLED
13897  if(allocator->GetRecorder() != VMA_NULL)
13898  {
13899  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13900  }
13901 #endif
13902 
13903  allocator->DestroyPool(pool);
13904 }
13905 
13906 void vmaGetPoolStats(
13907  VmaAllocator allocator,
13908  VmaPool pool,
13909  VmaPoolStats* pPoolStats)
13910 {
13911  VMA_ASSERT(allocator && pool && pPoolStats);
13912 
13913  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13914 
13915  allocator->GetPoolStats(pool, pPoolStats);
13916 }
13917 
13919  VmaAllocator allocator,
13920  VmaPool pool,
13921  size_t* pLostAllocationCount)
13922 {
13923  VMA_ASSERT(allocator && pool);
13924 
13925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13926 
13927 #if VMA_RECORDING_ENABLED
13928  if(allocator->GetRecorder() != VMA_NULL)
13929  {
13930  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13931  }
13932 #endif
13933 
13934  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13935 }
13936 
13937 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13938 {
13939  VMA_ASSERT(allocator && pool);
13940 
13941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13942 
13943  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13944 
13945  return allocator->CheckPoolCorruption(pool);
13946 }
13947 
13948 VkResult vmaAllocateMemory(
13949  VmaAllocator allocator,
13950  const VkMemoryRequirements* pVkMemoryRequirements,
13951  const VmaAllocationCreateInfo* pCreateInfo,
13952  VmaAllocation* pAllocation,
13953  VmaAllocationInfo* pAllocationInfo)
13954 {
13955  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13956 
13957  VMA_DEBUG_LOG("vmaAllocateMemory");
13958 
13959  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13960 
13961  VkResult result = allocator->AllocateMemory(
13962  *pVkMemoryRequirements,
13963  false, // requiresDedicatedAllocation
13964  false, // prefersDedicatedAllocation
13965  VK_NULL_HANDLE, // dedicatedBuffer
13966  VK_NULL_HANDLE, // dedicatedImage
13967  *pCreateInfo,
13968  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13969  pAllocation);
13970 
13971 #if VMA_RECORDING_ENABLED
13972  if(allocator->GetRecorder() != VMA_NULL)
13973  {
13974  allocator->GetRecorder()->RecordAllocateMemory(
13975  allocator->GetCurrentFrameIndex(),
13976  *pVkMemoryRequirements,
13977  *pCreateInfo,
13978  *pAllocation);
13979  }
13980 #endif
13981 
13982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13983  {
13984  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13985  }
13986 
13987  return result;
13988 }
13989 
13991  VmaAllocator allocator,
13992  VkBuffer buffer,
13993  const VmaAllocationCreateInfo* pCreateInfo,
13994  VmaAllocation* pAllocation,
13995  VmaAllocationInfo* pAllocationInfo)
13996 {
13997  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13998 
13999  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
14000 
14001  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14002 
14003  VkMemoryRequirements vkMemReq = {};
14004  bool requiresDedicatedAllocation = false;
14005  bool prefersDedicatedAllocation = false;
14006  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14007  requiresDedicatedAllocation,
14008  prefersDedicatedAllocation);
14009 
14010  VkResult result = allocator->AllocateMemory(
14011  vkMemReq,
14012  requiresDedicatedAllocation,
14013  prefersDedicatedAllocation,
14014  buffer, // dedicatedBuffer
14015  VK_NULL_HANDLE, // dedicatedImage
14016  *pCreateInfo,
14017  VMA_SUBALLOCATION_TYPE_BUFFER,
14018  pAllocation);
14019 
14020 #if VMA_RECORDING_ENABLED
14021  if(allocator->GetRecorder() != VMA_NULL)
14022  {
14023  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14024  allocator->GetCurrentFrameIndex(),
14025  vkMemReq,
14026  requiresDedicatedAllocation,
14027  prefersDedicatedAllocation,
14028  *pCreateInfo,
14029  *pAllocation);
14030  }
14031 #endif
14032 
14033  if(pAllocationInfo && result == VK_SUCCESS)
14034  {
14035  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14036  }
14037 
14038  return result;
14039 }
14040 
14041 VkResult vmaAllocateMemoryForImage(
14042  VmaAllocator allocator,
14043  VkImage image,
14044  const VmaAllocationCreateInfo* pCreateInfo,
14045  VmaAllocation* pAllocation,
14046  VmaAllocationInfo* pAllocationInfo)
14047 {
14048  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14049 
14050  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14051 
14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14053 
14054  VkMemoryRequirements vkMemReq = {};
14055  bool requiresDedicatedAllocation = false;
14056  bool prefersDedicatedAllocation = false;
14057  allocator->GetImageMemoryRequirements(image, vkMemReq,
14058  requiresDedicatedAllocation, prefersDedicatedAllocation);
14059 
14060  VkResult result = allocator->AllocateMemory(
14061  vkMemReq,
14062  requiresDedicatedAllocation,
14063  prefersDedicatedAllocation,
14064  VK_NULL_HANDLE, // dedicatedBuffer
14065  image, // dedicatedImage
14066  *pCreateInfo,
14067  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14068  pAllocation);
14069 
14070 #if VMA_RECORDING_ENABLED
14071  if(allocator->GetRecorder() != VMA_NULL)
14072  {
14073  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14074  allocator->GetCurrentFrameIndex(),
14075  vkMemReq,
14076  requiresDedicatedAllocation,
14077  prefersDedicatedAllocation,
14078  *pCreateInfo,
14079  *pAllocation);
14080  }
14081 #endif
14082 
14083  if(pAllocationInfo && result == VK_SUCCESS)
14084  {
14085  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14086  }
14087 
14088  return result;
14089 }
14090 
14091 void vmaFreeMemory(
14092  VmaAllocator allocator,
14093  VmaAllocation allocation)
14094 {
14095  VMA_ASSERT(allocator);
14096 
14097  if(allocation == VK_NULL_HANDLE)
14098  {
14099  return;
14100  }
14101 
14102  VMA_DEBUG_LOG("vmaFreeMemory");
14103 
14104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14105 
14106 #if VMA_RECORDING_ENABLED
14107  if(allocator->GetRecorder() != VMA_NULL)
14108  {
14109  allocator->GetRecorder()->RecordFreeMemory(
14110  allocator->GetCurrentFrameIndex(),
14111  allocation);
14112  }
14113 #endif
14114 
14115  allocator->FreeMemory(allocation);
14116 }
14117 
14118 VkResult vmaResizeAllocation(
14119  VmaAllocator allocator,
14120  VmaAllocation allocation,
14121  VkDeviceSize newSize)
14122 {
14123  VMA_ASSERT(allocator && allocation);
14124 
14125  VMA_DEBUG_LOG("vmaResizeAllocation");
14126 
14127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14128 
14129 #if VMA_RECORDING_ENABLED
14130  if(allocator->GetRecorder() != VMA_NULL)
14131  {
14132  allocator->GetRecorder()->RecordResizeAllocation(
14133  allocator->GetCurrentFrameIndex(),
14134  allocation,
14135  newSize);
14136  }
14137 #endif
14138 
14139  return allocator->ResizeAllocation(allocation, newSize);
14140 }
14141 
14143  VmaAllocator allocator,
14144  VmaAllocation allocation,
14145  VmaAllocationInfo* pAllocationInfo)
14146 {
14147  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14148 
14149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14150 
14151 #if VMA_RECORDING_ENABLED
14152  if(allocator->GetRecorder() != VMA_NULL)
14153  {
14154  allocator->GetRecorder()->RecordGetAllocationInfo(
14155  allocator->GetCurrentFrameIndex(),
14156  allocation);
14157  }
14158 #endif
14159 
14160  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14161 }
14162 
14163 VkBool32 vmaTouchAllocation(
14164  VmaAllocator allocator,
14165  VmaAllocation allocation)
14166 {
14167  VMA_ASSERT(allocator && allocation);
14168 
14169  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14170 
14171 #if VMA_RECORDING_ENABLED
14172  if(allocator->GetRecorder() != VMA_NULL)
14173  {
14174  allocator->GetRecorder()->RecordTouchAllocation(
14175  allocator->GetCurrentFrameIndex(),
14176  allocation);
14177  }
14178 #endif
14179 
14180  return allocator->TouchAllocation(allocation);
14181 }
14182 
14184  VmaAllocator allocator,
14185  VmaAllocation allocation,
14186  void* pUserData)
14187 {
14188  VMA_ASSERT(allocator && allocation);
14189 
14190  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14191 
14192  allocation->SetUserData(allocator, pUserData);
14193 
14194 #if VMA_RECORDING_ENABLED
14195  if(allocator->GetRecorder() != VMA_NULL)
14196  {
14197  allocator->GetRecorder()->RecordSetAllocationUserData(
14198  allocator->GetCurrentFrameIndex(),
14199  allocation,
14200  pUserData);
14201  }
14202 #endif
14203 }
14204 
14206  VmaAllocator allocator,
14207  VmaAllocation* pAllocation)
14208 {
14209  VMA_ASSERT(allocator && pAllocation);
14210 
14211  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14212 
14213  allocator->CreateLostAllocation(pAllocation);
14214 
14215 #if VMA_RECORDING_ENABLED
14216  if(allocator->GetRecorder() != VMA_NULL)
14217  {
14218  allocator->GetRecorder()->RecordCreateLostAllocation(
14219  allocator->GetCurrentFrameIndex(),
14220  *pAllocation);
14221  }
14222 #endif
14223 }
14224 
14225 VkResult vmaMapMemory(
14226  VmaAllocator allocator,
14227  VmaAllocation allocation,
14228  void** ppData)
14229 {
14230  VMA_ASSERT(allocator && allocation && ppData);
14231 
14232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14233 
14234  VkResult res = allocator->Map(allocation, ppData);
14235 
14236 #if VMA_RECORDING_ENABLED
14237  if(allocator->GetRecorder() != VMA_NULL)
14238  {
14239  allocator->GetRecorder()->RecordMapMemory(
14240  allocator->GetCurrentFrameIndex(),
14241  allocation);
14242  }
14243 #endif
14244 
14245  return res;
14246 }
14247 
14248 void vmaUnmapMemory(
14249  VmaAllocator allocator,
14250  VmaAllocation allocation)
14251 {
14252  VMA_ASSERT(allocator && allocation);
14253 
14254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14255 
14256 #if VMA_RECORDING_ENABLED
14257  if(allocator->GetRecorder() != VMA_NULL)
14258  {
14259  allocator->GetRecorder()->RecordUnmapMemory(
14260  allocator->GetCurrentFrameIndex(),
14261  allocation);
14262  }
14263 #endif
14264 
14265  allocator->Unmap(allocation);
14266 }
14267 
14268 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14269 {
14270  VMA_ASSERT(allocator && allocation);
14271 
14272  VMA_DEBUG_LOG("vmaFlushAllocation");
14273 
14274  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14275 
14276  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14277 
14278 #if VMA_RECORDING_ENABLED
14279  if(allocator->GetRecorder() != VMA_NULL)
14280  {
14281  allocator->GetRecorder()->RecordFlushAllocation(
14282  allocator->GetCurrentFrameIndex(),
14283  allocation, offset, size);
14284  }
14285 #endif
14286 }
14287 
14288 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14289 {
14290  VMA_ASSERT(allocator && allocation);
14291 
14292  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14293 
14294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14295 
14296  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14297 
14298 #if VMA_RECORDING_ENABLED
14299  if(allocator->GetRecorder() != VMA_NULL)
14300  {
14301  allocator->GetRecorder()->RecordInvalidateAllocation(
14302  allocator->GetCurrentFrameIndex(),
14303  allocation, offset, size);
14304  }
14305 #endif
14306 }
14307 
14308 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14309 {
14310  VMA_ASSERT(allocator);
14311 
14312  VMA_DEBUG_LOG("vmaCheckCorruption");
14313 
14314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14315 
14316  return allocator->CheckCorruption(memoryTypeBits);
14317 }
14318 
14319 VkResult vmaDefragment(
14320  VmaAllocator allocator,
14321  VmaAllocation* pAllocations,
14322  size_t allocationCount,
14323  VkBool32* pAllocationsChanged,
14324  const VmaDefragmentationInfo *pDefragmentationInfo,
14325  VmaDefragmentationStats* pDefragmentationStats)
14326 {
14327  VMA_ASSERT(allocator && pAllocations);
14328 
14329  VMA_DEBUG_LOG("vmaDefragment");
14330 
14331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14332 
14333  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14334 }
14335 
14336 VkResult vmaBindBufferMemory(
14337  VmaAllocator allocator,
14338  VmaAllocation allocation,
14339  VkBuffer buffer)
14340 {
14341  VMA_ASSERT(allocator && allocation && buffer);
14342 
14343  VMA_DEBUG_LOG("vmaBindBufferMemory");
14344 
14345  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14346 
14347  return allocator->BindBufferMemory(allocation, buffer);
14348 }
14349 
14350 VkResult vmaBindImageMemory(
14351  VmaAllocator allocator,
14352  VmaAllocation allocation,
14353  VkImage image)
14354 {
14355  VMA_ASSERT(allocator && allocation && image);
14356 
14357  VMA_DEBUG_LOG("vmaBindImageMemory");
14358 
14359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14360 
14361  return allocator->BindImageMemory(allocation, image);
14362 }
14363 
14364 VkResult vmaCreateBuffer(
14365  VmaAllocator allocator,
14366  const VkBufferCreateInfo* pBufferCreateInfo,
14367  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14368  VkBuffer* pBuffer,
14369  VmaAllocation* pAllocation,
14370  VmaAllocationInfo* pAllocationInfo)
14371 {
14372  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14373 
14374  if(pBufferCreateInfo->size == 0)
14375  {
14376  return VK_ERROR_VALIDATION_FAILED_EXT;
14377  }
14378 
14379  VMA_DEBUG_LOG("vmaCreateBuffer");
14380 
14381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14382 
14383  *pBuffer = VK_NULL_HANDLE;
14384  *pAllocation = VK_NULL_HANDLE;
14385 
14386  // 1. Create VkBuffer.
14387  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14388  allocator->m_hDevice,
14389  pBufferCreateInfo,
14390  allocator->GetAllocationCallbacks(),
14391  pBuffer);
14392  if(res >= 0)
14393  {
14394  // 2. vkGetBufferMemoryRequirements.
14395  VkMemoryRequirements vkMemReq = {};
14396  bool requiresDedicatedAllocation = false;
14397  bool prefersDedicatedAllocation = false;
14398  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14399  requiresDedicatedAllocation, prefersDedicatedAllocation);
14400 
14401  // Make sure alignment requirements for specific buffer usages reported
14402  // in Physical Device Properties are included in alignment reported by memory requirements.
14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14404  {
14405  VMA_ASSERT(vkMemReq.alignment %
14406  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14407  }
14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14409  {
14410  VMA_ASSERT(vkMemReq.alignment %
14411  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14412  }
14413  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14414  {
14415  VMA_ASSERT(vkMemReq.alignment %
14416  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14417  }
14418 
14419  // 3. Allocate memory using allocator.
14420  res = allocator->AllocateMemory(
14421  vkMemReq,
14422  requiresDedicatedAllocation,
14423  prefersDedicatedAllocation,
14424  *pBuffer, // dedicatedBuffer
14425  VK_NULL_HANDLE, // dedicatedImage
14426  *pAllocationCreateInfo,
14427  VMA_SUBALLOCATION_TYPE_BUFFER,
14428  pAllocation);
14429 
14430 #if VMA_RECORDING_ENABLED
14431  if(allocator->GetRecorder() != VMA_NULL)
14432  {
14433  allocator->GetRecorder()->RecordCreateBuffer(
14434  allocator->GetCurrentFrameIndex(),
14435  *pBufferCreateInfo,
14436  *pAllocationCreateInfo,
14437  *pAllocation);
14438  }
14439 #endif
14440 
14441  if(res >= 0)
14442  {
14443  // 3. Bind buffer with memory.
14444  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14445  if(res >= 0)
14446  {
14447  // All steps succeeded.
14448  #if VMA_STATS_STRING_ENABLED
14449  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14450  #endif
14451  if(pAllocationInfo != VMA_NULL)
14452  {
14453  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14454  }
14455 
14456  return VK_SUCCESS;
14457  }
14458  allocator->FreeMemory(*pAllocation);
14459  *pAllocation = VK_NULL_HANDLE;
14460  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14461  *pBuffer = VK_NULL_HANDLE;
14462  return res;
14463  }
14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14465  *pBuffer = VK_NULL_HANDLE;
14466  return res;
14467  }
14468  return res;
14469 }
14470 
14471 void vmaDestroyBuffer(
14472  VmaAllocator allocator,
14473  VkBuffer buffer,
14474  VmaAllocation allocation)
14475 {
14476  VMA_ASSERT(allocator);
14477 
14478  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14479  {
14480  return;
14481  }
14482 
14483  VMA_DEBUG_LOG("vmaDestroyBuffer");
14484 
14485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14486 
14487 #if VMA_RECORDING_ENABLED
14488  if(allocator->GetRecorder() != VMA_NULL)
14489  {
14490  allocator->GetRecorder()->RecordDestroyBuffer(
14491  allocator->GetCurrentFrameIndex(),
14492  allocation);
14493  }
14494 #endif
14495 
14496  if(buffer != VK_NULL_HANDLE)
14497  {
14498  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14499  }
14500 
14501  if(allocation != VK_NULL_HANDLE)
14502  {
14503  allocator->FreeMemory(allocation);
14504  }
14505 }
14506 
14507 VkResult vmaCreateImage(
14508  VmaAllocator allocator,
14509  const VkImageCreateInfo* pImageCreateInfo,
14510  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14511  VkImage* pImage,
14512  VmaAllocation* pAllocation,
14513  VmaAllocationInfo* pAllocationInfo)
14514 {
14515  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14516 
14517  if(pImageCreateInfo->extent.width == 0 ||
14518  pImageCreateInfo->extent.height == 0 ||
14519  pImageCreateInfo->extent.depth == 0 ||
14520  pImageCreateInfo->mipLevels == 0 ||
14521  pImageCreateInfo->arrayLayers == 0)
14522  {
14523  return VK_ERROR_VALIDATION_FAILED_EXT;
14524  }
14525 
14526  VMA_DEBUG_LOG("vmaCreateImage");
14527 
14528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14529 
14530  *pImage = VK_NULL_HANDLE;
14531  *pAllocation = VK_NULL_HANDLE;
14532 
14533  // 1. Create VkImage.
14534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14535  allocator->m_hDevice,
14536  pImageCreateInfo,
14537  allocator->GetAllocationCallbacks(),
14538  pImage);
14539  if(res >= 0)
14540  {
14541  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14542  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14543  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14544 
14545  // 2. Allocate memory using allocator.
14546  VkMemoryRequirements vkMemReq = {};
14547  bool requiresDedicatedAllocation = false;
14548  bool prefersDedicatedAllocation = false;
14549  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14550  requiresDedicatedAllocation, prefersDedicatedAllocation);
14551 
14552  res = allocator->AllocateMemory(
14553  vkMemReq,
14554  requiresDedicatedAllocation,
14555  prefersDedicatedAllocation,
14556  VK_NULL_HANDLE, // dedicatedBuffer
14557  *pImage, // dedicatedImage
14558  *pAllocationCreateInfo,
14559  suballocType,
14560  pAllocation);
14561 
14562 #if VMA_RECORDING_ENABLED
14563  if(allocator->GetRecorder() != VMA_NULL)
14564  {
14565  allocator->GetRecorder()->RecordCreateImage(
14566  allocator->GetCurrentFrameIndex(),
14567  *pImageCreateInfo,
14568  *pAllocationCreateInfo,
14569  *pAllocation);
14570  }
14571 #endif
14572 
14573  if(res >= 0)
14574  {
14575  // 3. Bind image with memory.
14576  res = allocator->BindImageMemory(*pAllocation, *pImage);
14577  if(res >= 0)
14578  {
14579  // All steps succeeded.
14580  #if VMA_STATS_STRING_ENABLED
14581  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14582  #endif
14583  if(pAllocationInfo != VMA_NULL)
14584  {
14585  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14586  }
14587 
14588  return VK_SUCCESS;
14589  }
14590  allocator->FreeMemory(*pAllocation);
14591  *pAllocation = VK_NULL_HANDLE;
14592  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14593  *pImage = VK_NULL_HANDLE;
14594  return res;
14595  }
14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14597  *pImage = VK_NULL_HANDLE;
14598  return res;
14599  }
14600  return res;
14601 }
14602 
14603 void vmaDestroyImage(
14604  VmaAllocator allocator,
14605  VkImage image,
14606  VmaAllocation allocation)
14607 {
14608  VMA_ASSERT(allocator);
14609 
14610  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14611  {
14612  return;
14613  }
14614 
14615  VMA_DEBUG_LOG("vmaDestroyImage");
14616 
14617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14618 
14619 #if VMA_RECORDING_ENABLED
14620  if(allocator->GetRecorder() != VMA_NULL)
14621  {
14622  allocator->GetRecorder()->RecordDestroyImage(
14623  allocator->GetCurrentFrameIndex(),
14624  allocation);
14625  }
14626 #endif
14627 
14628  if(image != VK_NULL_HANDLE)
14629  {
14630  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14631  }
14632  if(allocation != VK_NULL_HANDLE)
14633  {
14634  allocator->FreeMemory(allocation);
14635  }
14636 }
14637 
14638 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1586
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1887
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1643
@@ -73,26 +73,26 @@ $(function() {
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1617
-
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2209
+
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2212
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1598
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1844
Definition: vk_mem_alloc.h:1947
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1590
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2309
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2312
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1640
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2579
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2098
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2582
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2101
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1487
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2190
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2193
Definition: vk_mem_alloc.h:1924
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1579
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1997
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2000
Definition: vk_mem_alloc.h:1871
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1652
-
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2126
+
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2129
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1705
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1637
@@ -102,41 +102,41 @@ $(function() {
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1777
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1595
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1776
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2583
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2586
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1669
VmaStatInfo total
Definition: vk_mem_alloc.h:1786
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2591
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1981
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2574
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2594
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1984
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2577
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1596
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1521
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1646
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2140
-
Definition: vk_mem_alloc.h:2134
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2143
+
Definition: vk_mem_alloc.h:2137
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1712
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2319
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2322
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1591
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1615
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2018
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2160
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2196
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2021
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2163
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2199
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1577
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2143
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2146
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1822
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2569
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2572
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2587
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2590
Definition: vk_mem_alloc.h:1861
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2005
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2008
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1594
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
@@ -144,43 +144,43 @@ $(function() {
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1782
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1527
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
- +
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1548
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1619
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1553
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2589
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2592
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1992
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2206
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1995
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2209
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1587
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1765
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2155
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2158
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1540
-
Definition: vk_mem_alloc.h:2130
+
Definition: vk_mem_alloc.h:2133
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1931
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1778
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1544
-
Definition: vk_mem_alloc.h:1955
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2146
+
Definition: vk_mem_alloc.h:1958
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2149
Definition: vk_mem_alloc.h:1870
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1593
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1987
-
Definition: vk_mem_alloc.h:1978
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1990
+
Definition: vk_mem_alloc.h:1981
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1768
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1589
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2168
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2171
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1655
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2199
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1976
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2011
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2202
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1979
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2014
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1693
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1784
@@ -192,62 +192,62 @@ $(function() {
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1542
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1599
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2182
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2185
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1592
Definition: vk_mem_alloc.h:1942
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1633
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2333
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2336
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1649
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1777
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1774
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2187
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2190
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1951
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2314
-
Definition: vk_mem_alloc.h:1962
-
Definition: vk_mem_alloc.h:1974
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2585
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2317
+
Definition: vk_mem_alloc.h:1965
+
Definition: vk_mem_alloc.h:1977
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2588
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1585
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1772
Definition: vk_mem_alloc.h:1827
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2136
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2139
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1622
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1770
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1597
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1601
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1898
-
Definition: vk_mem_alloc.h:1969
+
Definition: vk_mem_alloc.h:1972
Definition: vk_mem_alloc.h:1854
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2328
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2331
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1575
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1588
-
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2115
+
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2118
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2295
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2298
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1959
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2080
+
Definition: vk_mem_alloc.h:1962
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2083
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1778
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1609
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1785
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2193
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2196
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1778
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2300
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2303