From c9f948243c83ea452096fed9caa05bbca123e300 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 17 Apr 2019 13:33:27 +0200 Subject: [PATCH] change documentation to allow defragmentation of only buffers not images #59 --- docs/html/defragmentation.html | 8 +- docs/html/vk__mem__alloc_8h_source.html | 294 ++++++++++++------------ src/vk_mem_alloc.h | 14 +- 3 files changed, 149 insertions(+), 167 deletions(-) diff --git a/docs/html/defragmentation.html b/docs/html/defragmentation.html index 090a6c3..3b30af8 100644 --- a/docs/html/defragmentation.html +++ b/docs/html/defragmentation.html @@ -103,13 +103,7 @@ Defragmenting GPU memory

You may try not to block your entire program to wait until defragmentation finishes, but do it in the background, as long as you carefully fullfill requirements described in function vmaDefragmentationBegin().

Additional notes

-

It is only legal to defragment allocations bound to:

- -

Defragmentation of images created with VK_IMAGE_TILING_OPTIMAL or in any other layout may give undefined results.

-

If you defragment allocations bound to images, new images to be bound to new memory region after defragmentation should be created with VK_IMAGE_LAYOUT_PREINITIALIZED and then transitioned to their original layout from before defragmentation if needed using an image memory barrier.

+

It is only legal to defragment allocations bound to buffers, not images!

While using defragmentation, you may experience validation layer warnings, which you just need to ignore. See Validation layer warnings.

Please don't expect memory to be fully compacted after defragmentation. Algorithms inside are based on some heuristics that try to maximize number of Vulkan memory blocks to make totally empty to release them, as well as to maximimze continuous empty space inside remaining blocks, while minimizing the number and size of allocations that need to be moved. Some fragmentation may still remain - this is normal.

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 57612c9..ef48205 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,212 +65,212 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1647 /*
1648 Define this macro to 0/1 to disable/enable support for recording functionality,
1649 available through VmaAllocatorCreateInfo::pRecordSettings.
1650 */
1651 #ifndef VMA_RECORDING_ENABLED
1652  #ifdef _WIN32
1653  #define VMA_RECORDING_ENABLED 1
1654  #else
1655  #define VMA_RECORDING_ENABLED 0
1656  #endif
1657 #endif
1658 
1659 #ifndef NOMINMAX
1660  #define NOMINMAX // For windows.h
1661 #endif
1662 
1663 #ifndef VULKAN_H_
1664  #include <vulkan/vulkan.h>
1665 #endif
1666 
1667 #if VMA_RECORDING_ENABLED
1668  #include <windows.h>
1669 #endif
1670 
1671 #if !defined(VMA_DEDICATED_ALLOCATION)
1672  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1673  #define VMA_DEDICATED_ALLOCATION 1
1674  #else
1675  #define VMA_DEDICATED_ALLOCATION 0
1676  #endif
1677 #endif
1678 
1688 VK_DEFINE_HANDLE(VmaAllocator)
1689 
1690 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1692  VmaAllocator allocator,
1693  uint32_t memoryType,
1694  VkDeviceMemory memory,
1695  VkDeviceSize size);
1697 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1698  VmaAllocator allocator,
1699  uint32_t memoryType,
1700  VkDeviceMemory memory,
1701  VkDeviceSize size);
1702 
1716 
1746 
1749 typedef VkFlags VmaAllocatorCreateFlags;
1750 
1755 typedef struct VmaVulkanFunctions {
1756  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1757  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1758  PFN_vkAllocateMemory vkAllocateMemory;
1759  PFN_vkFreeMemory vkFreeMemory;
1760  PFN_vkMapMemory vkMapMemory;
1761  PFN_vkUnmapMemory vkUnmapMemory;
1762  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1763  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1764  PFN_vkBindBufferMemory vkBindBufferMemory;
1765  PFN_vkBindImageMemory vkBindImageMemory;
1766  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1767  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1768  PFN_vkCreateBuffer vkCreateBuffer;
1769  PFN_vkDestroyBuffer vkDestroyBuffer;
1770  PFN_vkCreateImage vkCreateImage;
1771  PFN_vkDestroyImage vkDestroyImage;
1772  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1773 #if VMA_DEDICATED_ALLOCATION
1774  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1775  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1776 #endif
1778 
1780 typedef enum VmaRecordFlagBits {
1787 
1790 typedef VkFlags VmaRecordFlags;
1791 
1793 typedef struct VmaRecordSettings
1794 {
1804  const char* pFilePath;
1806 
1809 {
1813 
1814  VkPhysicalDevice physicalDevice;
1816 
1817  VkDevice device;
1819 
1822 
1823  const VkAllocationCallbacks* pAllocationCallbacks;
1825 
1865  const VkDeviceSize* pHeapSizeLimit;
1886 
1888 VkResult vmaCreateAllocator(
1889  const VmaAllocatorCreateInfo* pCreateInfo,
1890  VmaAllocator* pAllocator);
1891 
1893 void vmaDestroyAllocator(
1894  VmaAllocator allocator);
1895 
1901  VmaAllocator allocator,
1902  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1903 
1909  VmaAllocator allocator,
1910  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1911 
1919  VmaAllocator allocator,
1920  uint32_t memoryTypeIndex,
1921  VkMemoryPropertyFlags* pFlags);
1922 
1932  VmaAllocator allocator,
1933  uint32_t frameIndex);
1934 
1937 typedef struct VmaStatInfo
1938 {
1940  uint32_t blockCount;
1946  VkDeviceSize usedBytes;
1948  VkDeviceSize unusedBytes;
1951 } VmaStatInfo;
1952 
1954 typedef struct VmaStats
1955 {
1956  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1957  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1959 } VmaStats;
1960 
1962 void vmaCalculateStats(
1963  VmaAllocator allocator,
1964  VmaStats* pStats);
1965 
1966 #ifndef VMA_STATS_STRING_ENABLED
1967 #define VMA_STATS_STRING_ENABLED 1
1968 #endif
1969 
1970 #if VMA_STATS_STRING_ENABLED
1971 
1973 
1975 void vmaBuildStatsString(
1976  VmaAllocator allocator,
1977  char** ppStatsString,
1978  VkBool32 detailedMap);
1979 
1980 void vmaFreeStatsString(
1981  VmaAllocator allocator,
1982  char* pStatsString);
1983 
1984 #endif // #if VMA_STATS_STRING_ENABLED
1985 
1994 VK_DEFINE_HANDLE(VmaPool)
1995 
1996 typedef enum VmaMemoryUsage
1997 {
2046 } VmaMemoryUsage;
2047 
2057 
2118 
2134 
2144 
2151 
2155 
2157 {
2170  VkMemoryPropertyFlags requiredFlags;
2175  VkMemoryPropertyFlags preferredFlags;
2183  uint32_t memoryTypeBits;
2196  void* pUserData;
2198 
2215 VkResult vmaFindMemoryTypeIndex(
2216  VmaAllocator allocator,
2217  uint32_t memoryTypeBits,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2234  VmaAllocator allocator,
2235  const VkBufferCreateInfo* pBufferCreateInfo,
2236  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2237  uint32_t* pMemoryTypeIndex);
2238 
2252  VmaAllocator allocator,
2253  const VkImageCreateInfo* pImageCreateInfo,
2254  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2255  uint32_t* pMemoryTypeIndex);
2256 
2277 
2294 
2305 
2311 
2314 typedef VkFlags VmaPoolCreateFlags;
2315 
2318 typedef struct VmaPoolCreateInfo {
2333  VkDeviceSize blockSize;
2362 
2365 typedef struct VmaPoolStats {
2368  VkDeviceSize size;
2371  VkDeviceSize unusedSize;
2384  VkDeviceSize unusedRangeSizeMax;
2387  size_t blockCount;
2388 } VmaPoolStats;
2389 
2396 VkResult vmaCreatePool(
2397  VmaAllocator allocator,
2398  const VmaPoolCreateInfo* pCreateInfo,
2399  VmaPool* pPool);
2400 
2403 void vmaDestroyPool(
2404  VmaAllocator allocator,
2405  VmaPool pool);
2406 
2413 void vmaGetPoolStats(
2414  VmaAllocator allocator,
2415  VmaPool pool,
2416  VmaPoolStats* pPoolStats);
2417 
2425  VmaAllocator allocator,
2426  VmaPool pool,
2427  size_t* pLostAllocationCount);
2428 
2443 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2444 
2469 VK_DEFINE_HANDLE(VmaAllocation)
2470 
2471 
2473 typedef struct VmaAllocationInfo {
2478  uint32_t memoryType;
2487  VkDeviceMemory deviceMemory;
2492  VkDeviceSize offset;
2497  VkDeviceSize size;
2511  void* pUserData;
2513 
2524 VkResult vmaAllocateMemory(
2525  VmaAllocator allocator,
2526  const VkMemoryRequirements* pVkMemoryRequirements,
2527  const VmaAllocationCreateInfo* pCreateInfo,
2528  VmaAllocation* pAllocation,
2529  VmaAllocationInfo* pAllocationInfo);
2530 
2550 VkResult vmaAllocateMemoryPages(
2551  VmaAllocator allocator,
2552  const VkMemoryRequirements* pVkMemoryRequirements,
2553  const VmaAllocationCreateInfo* pCreateInfo,
2554  size_t allocationCount,
2555  VmaAllocation* pAllocations,
2556  VmaAllocationInfo* pAllocationInfo);
2557 
2565  VmaAllocator allocator,
2566  VkBuffer buffer,
2567  const VmaAllocationCreateInfo* pCreateInfo,
2568  VmaAllocation* pAllocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2572 VkResult vmaAllocateMemoryForImage(
2573  VmaAllocator allocator,
2574  VkImage image,
2575  const VmaAllocationCreateInfo* pCreateInfo,
2576  VmaAllocation* pAllocation,
2577  VmaAllocationInfo* pAllocationInfo);
2578 
2583 void vmaFreeMemory(
2584  VmaAllocator allocator,
2585  VmaAllocation allocation);
2586 
2597 void vmaFreeMemoryPages(
2598  VmaAllocator allocator,
2599  size_t allocationCount,
2600  VmaAllocation* pAllocations);
2601 
2622 VkResult vmaResizeAllocation(
2623  VmaAllocator allocator,
2624  VmaAllocation allocation,
2625  VkDeviceSize newSize);
2626 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  VmaAllocationInfo* pAllocationInfo);
2647 
2662 VkBool32 vmaTouchAllocation(
2663  VmaAllocator allocator,
2664  VmaAllocation allocation);
2665 
2680  VmaAllocator allocator,
2681  VmaAllocation allocation,
2682  void* pUserData);
2683 
2695  VmaAllocator allocator,
2696  VmaAllocation* pAllocation);
2697 
2732 VkResult vmaMapMemory(
2733  VmaAllocator allocator,
2734  VmaAllocation allocation,
2735  void** ppData);
2736 
2741 void vmaUnmapMemory(
2742  VmaAllocator allocator,
2743  VmaAllocation allocation);
2744 
2761 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2762 
2779 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2780 
2797 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2798 
2805 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2806 
2807 typedef enum VmaDefragmentationFlagBits {
2811 typedef VkFlags VmaDefragmentationFlags;
2812 
2817 typedef struct VmaDefragmentationInfo2 {
2841  uint32_t poolCount;
2862  VkDeviceSize maxCpuBytesToMove;
2872  VkDeviceSize maxGpuBytesToMove;
2886  VkCommandBuffer commandBuffer;
2888 
2893 typedef struct VmaDefragmentationInfo {
2898  VkDeviceSize maxBytesToMove;
2905 
2907 typedef struct VmaDefragmentationStats {
2909  VkDeviceSize bytesMoved;
2911  VkDeviceSize bytesFreed;
2917 
2947 VkResult vmaDefragmentationBegin(
2948  VmaAllocator allocator,
2949  const VmaDefragmentationInfo2* pInfo,
2950  VmaDefragmentationStats* pStats,
2951  VmaDefragmentationContext *pContext);
2952 
2958 VkResult vmaDefragmentationEnd(
2959  VmaAllocator allocator,
2960  VmaDefragmentationContext context);
2961 
3002 VkResult vmaDefragment(
3003  VmaAllocator allocator,
3004  VmaAllocation* pAllocations,
3005  size_t allocationCount,
3006  VkBool32* pAllocationsChanged,
3007  const VmaDefragmentationInfo *pDefragmentationInfo,
3008  VmaDefragmentationStats* pDefragmentationStats);
3009 
3022 VkResult vmaBindBufferMemory(
3023  VmaAllocator allocator,
3024  VmaAllocation allocation,
3025  VkBuffer buffer);
3026 
3039 VkResult vmaBindImageMemory(
3040  VmaAllocator allocator,
3041  VmaAllocation allocation,
3042  VkImage image);
3043 
3070 VkResult vmaCreateBuffer(
3071  VmaAllocator allocator,
3072  const VkBufferCreateInfo* pBufferCreateInfo,
3073  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3074  VkBuffer* pBuffer,
3075  VmaAllocation* pAllocation,
3076  VmaAllocationInfo* pAllocationInfo);
3077 
3089 void vmaDestroyBuffer(
3090  VmaAllocator allocator,
3091  VkBuffer buffer,
3092  VmaAllocation allocation);
3093 
3095 VkResult vmaCreateImage(
3096  VmaAllocator allocator,
3097  const VkImageCreateInfo* pImageCreateInfo,
3098  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3099  VkImage* pImage,
3100  VmaAllocation* pAllocation,
3101  VmaAllocationInfo* pAllocationInfo);
3102 
3114 void vmaDestroyImage(
3115  VmaAllocator allocator,
3116  VkImage image,
3117  VmaAllocation allocation);
3118 
3119 #ifdef __cplusplus
3120 }
3121 #endif
3122 
3123 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3124 
3125 // For Visual Studio IntelliSense.
3126 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3127 #define VMA_IMPLEMENTATION
3128 #endif
3129 
3130 #ifdef VMA_IMPLEMENTATION
3131 #undef VMA_IMPLEMENTATION
3132 
3133 #include <cstdint>
3134 #include <cstdlib>
3135 #include <cstring>
3136 
3137 /*******************************************************************************
3138 CONFIGURATION SECTION
3139 
3140 Define some of these macros before each #include of this header or change them
3141 here if you need other then default behavior depending on your environment.
3142 */
3143 
3144 /*
3145 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3146 internally, like:
3147 
3148  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3149 
3150 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3151 VmaAllocatorCreateInfo::pVulkanFunctions.
3152 */
3153 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3154 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3155 #endif
3156 
3157 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3158 //#define VMA_USE_STL_CONTAINERS 1
3159 
3160 /* Set this macro to 1 to make the library including and using STL containers:
3161 std::pair, std::vector, std::list, std::unordered_map.
3162 
3163 Set it to 0 or undefined to make the library using its own implementation of
3164 the containers.
3165 */
3166 #if VMA_USE_STL_CONTAINERS
3167  #define VMA_USE_STL_VECTOR 1
3168  #define VMA_USE_STL_UNORDERED_MAP 1
3169  #define VMA_USE_STL_LIST 1
3170 #endif
3171 
3172 #ifndef VMA_USE_STL_SHARED_MUTEX
3173  // Compiler conforms to C++17.
3174  #if __cplusplus >= 201703L
3175  #define VMA_USE_STL_SHARED_MUTEX 1
3176  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3177  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3178  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3179  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3180  #define VMA_USE_STL_SHARED_MUTEX 1
3181  #else
3182  #define VMA_USE_STL_SHARED_MUTEX 0
3183  #endif
3184 #endif
3185 
3186 /*
3187 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3188 Library has its own container implementation.
3189 */
3190 #if VMA_USE_STL_VECTOR
3191  #include <vector>
3192 #endif
3193 
3194 #if VMA_USE_STL_UNORDERED_MAP
3195  #include <unordered_map>
3196 #endif
3197 
3198 #if VMA_USE_STL_LIST
3199  #include <list>
3200 #endif
3201 
3202 /*
3203 Following headers are used in this CONFIGURATION section only, so feel free to
3204 remove them if not needed.
3205 */
3206 #include <cassert> // for assert
3207 #include <algorithm> // for min, max
3208 #include <mutex>
3209 #include <atomic> // for std::atomic
3210 
3211 #ifndef VMA_NULL
3212  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3213  #define VMA_NULL nullptr
3214 #endif
3215 
3216 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3217 #include <cstdlib>
3218 void *aligned_alloc(size_t alignment, size_t size)
3219 {
3220  // alignment must be >= sizeof(void*)
3221  if(alignment < sizeof(void*))
3222  {
3223  alignment = sizeof(void*);
3224  }
3225 
3226  return memalign(alignment, size);
3227 }
3228 #elif defined(__APPLE__) || defined(__ANDROID__)
3229 #include <cstdlib>
3230 void *aligned_alloc(size_t alignment, size_t size)
3231 {
3232  // alignment must be >= sizeof(void*)
3233  if(alignment < sizeof(void*))
3234  {
3235  alignment = sizeof(void*);
3236  }
3237 
3238  void *pointer;
3239  if(posix_memalign(&pointer, alignment, size) == 0)
3240  return pointer;
3241  return VMA_NULL;
3242 }
3243 #endif
3244 
3245 // If your compiler is not compatible with C++11 and definition of
3246 // aligned_alloc() function is missing, uncommeting following line may help:
3247 
3248 //#include <malloc.h>
3249 
3250 // Normal assert to check for programmer's errors, especially in Debug configuration.
3251 #ifndef VMA_ASSERT
3252  #ifdef _DEBUG
3253  #define VMA_ASSERT(expr) assert(expr)
3254  #else
3255  #define VMA_ASSERT(expr)
3256  #endif
3257 #endif
3258 
3259 // Assert that will be called very often, like inside data structures e.g. operator[].
3260 // Making it non-empty can make program slow.
3261 #ifndef VMA_HEAVY_ASSERT
3262  #ifdef _DEBUG
3263  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3264  #else
3265  #define VMA_HEAVY_ASSERT(expr)
3266  #endif
3267 #endif
3268 
3269 #ifndef VMA_ALIGN_OF
3270  #define VMA_ALIGN_OF(type) (__alignof(type))
3271 #endif
3272 
3273 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3274  #if defined(_WIN32)
3275  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3276  #else
3277  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3278  #endif
3279 #endif
3280 
3281 #ifndef VMA_SYSTEM_FREE
3282  #if defined(_WIN32)
3283  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3284  #else
3285  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3286  #endif
3287 #endif
3288 
3289 #ifndef VMA_MIN
3290  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3291 #endif
3292 
3293 #ifndef VMA_MAX
3294  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3295 #endif
3296 
3297 #ifndef VMA_SWAP
3298  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3299 #endif
3300 
3301 #ifndef VMA_SORT
3302  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3303 #endif
3304 
3305 #ifndef VMA_DEBUG_LOG
3306  #define VMA_DEBUG_LOG(format, ...)
3307  /*
3308  #define VMA_DEBUG_LOG(format, ...) do { \
3309  printf(format, __VA_ARGS__); \
3310  printf("\n"); \
3311  } while(false)
3312  */
3313 #endif
3314 
3315 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3316 #if VMA_STATS_STRING_ENABLED
3317  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3318  {
3319  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3320  }
3321  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3322  {
3323  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3324  }
3325  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3326  {
3327  snprintf(outStr, strLen, "%p", ptr);
3328  }
3329 #endif
3330 
3331 #ifndef VMA_MUTEX
3332  class VmaMutex
3333  {
3334  public:
3335  void Lock() { m_Mutex.lock(); }
3336  void Unlock() { m_Mutex.unlock(); }
3337  private:
3338  std::mutex m_Mutex;
3339  };
3340  #define VMA_MUTEX VmaMutex
3341 #endif
3342 
3343 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3344 #ifndef VMA_RW_MUTEX
3345  #if VMA_USE_STL_SHARED_MUTEX
3346  // Use std::shared_mutex from C++17.
3347  #include <shared_mutex>
3348  class VmaRWMutex
3349  {
3350  public:
3351  void LockRead() { m_Mutex.lock_shared(); }
3352  void UnlockRead() { m_Mutex.unlock_shared(); }
3353  void LockWrite() { m_Mutex.lock(); }
3354  void UnlockWrite() { m_Mutex.unlock(); }
3355  private:
3356  std::shared_mutex m_Mutex;
3357  };
3358  #define VMA_RW_MUTEX VmaRWMutex
3359  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3360  // Use SRWLOCK from WinAPI.
3361  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3362  class VmaRWMutex
3363  {
3364  public:
3365  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3366  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3367  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3368  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3369  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3370  private:
3371  SRWLOCK m_Lock;
3372  };
3373  #define VMA_RW_MUTEX VmaRWMutex
3374  #else
3375  // Less efficient fallback: Use normal mutex.
3376  class VmaRWMutex
3377  {
3378  public:
3379  void LockRead() { m_Mutex.Lock(); }
3380  void UnlockRead() { m_Mutex.Unlock(); }
3381  void LockWrite() { m_Mutex.Lock(); }
3382  void UnlockWrite() { m_Mutex.Unlock(); }
3383  private:
3384  VMA_MUTEX m_Mutex;
3385  };
3386  #define VMA_RW_MUTEX VmaRWMutex
3387  #endif // #if VMA_USE_STL_SHARED_MUTEX
3388 #endif // #ifndef VMA_RW_MUTEX
3389 
3390 /*
3391 If providing your own implementation, you need to implement a subset of std::atomic:
3392 
3393 - Constructor(uint32_t desired)
3394 - uint32_t load() const
3395 - void store(uint32_t desired)
3396 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3397 */
3398 #ifndef VMA_ATOMIC_UINT32
3399  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3400 #endif
3401 
3402 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3403 
3407  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3408 #endif
3409 
3410 #ifndef VMA_DEBUG_ALIGNMENT
3411 
3415  #define VMA_DEBUG_ALIGNMENT (1)
3416 #endif
3417 
3418 #ifndef VMA_DEBUG_MARGIN
3419 
3423  #define VMA_DEBUG_MARGIN (0)
3424 #endif
3425 
3426 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3427 
3431  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3432 #endif
3433 
3434 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3435 
3440  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3441 #endif
3442 
3443 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3444 
3448  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3449 #endif
3450 
3451 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3452 
3456  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3457 #endif
3458 
3459 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3460  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3462 #endif
3463 
3464 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3465  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3467 #endif
3468 
3469 #ifndef VMA_CLASS_NO_COPY
3470  #define VMA_CLASS_NO_COPY(className) \
3471  private: \
3472  className(const className&) = delete; \
3473  className& operator=(const className&) = delete;
3474 #endif
3475 
3476 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3477 
3478 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3479 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3480 
3481 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3482 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3483 
3484 /*******************************************************************************
3485 END OF CONFIGURATION
3486 */
3487 
3488 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3489 
3490 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3491  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3492 
3493 // Returns number of bits set to 1 in (v).
3494 static inline uint32_t VmaCountBitsSet(uint32_t v)
3495 {
3496  uint32_t c = v - ((v >> 1) & 0x55555555);
3497  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3498  c = ((c >> 4) + c) & 0x0F0F0F0F;
3499  c = ((c >> 8) + c) & 0x00FF00FF;
3500  c = ((c >> 16) + c) & 0x0000FFFF;
3501  return c;
3502 }
3503 
3504 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3505 // Use types like uint32_t, uint64_t as T.
3506 template <typename T>
3507 static inline T VmaAlignUp(T val, T align)
3508 {
3509  return (val + align - 1) / align * align;
3510 }
3511 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3512 // Use types like uint32_t, uint64_t as T.
3513 template <typename T>
3514 static inline T VmaAlignDown(T val, T align)
3515 {
3516  return val / align * align;
3517 }
3518 
3519 // Division with mathematical rounding to nearest number.
3520 template <typename T>
3521 static inline T VmaRoundDiv(T x, T y)
3522 {
3523  return (x + (y / (T)2)) / y;
3524 }
3525 
3526 /*
3527 Returns true if given number is a power of two.
3528 T must be unsigned integer number or signed integer but always nonnegative.
3529 For 0 returns true.
3530 */
3531 template <typename T>
3532 inline bool VmaIsPow2(T x)
3533 {
3534  return (x & (x-1)) == 0;
3535 }
3536 
3537 // Returns smallest power of 2 greater or equal to v.
3538 static inline uint32_t VmaNextPow2(uint32_t v)
3539 {
3540  v--;
3541  v |= v >> 1;
3542  v |= v >> 2;
3543  v |= v >> 4;
3544  v |= v >> 8;
3545  v |= v >> 16;
3546  v++;
3547  return v;
3548 }
3549 static inline uint64_t VmaNextPow2(uint64_t v)
3550 {
3551  v--;
3552  v |= v >> 1;
3553  v |= v >> 2;
3554  v |= v >> 4;
3555  v |= v >> 8;
3556  v |= v >> 16;
3557  v |= v >> 32;
3558  v++;
3559  return v;
3560 }
3561 
3562 // Returns largest power of 2 less or equal to v.
3563 static inline uint32_t VmaPrevPow2(uint32_t v)
3564 {
3565  v |= v >> 1;
3566  v |= v >> 2;
3567  v |= v >> 4;
3568  v |= v >> 8;
3569  v |= v >> 16;
3570  v = v ^ (v >> 1);
3571  return v;
3572 }
3573 static inline uint64_t VmaPrevPow2(uint64_t v)
3574 {
3575  v |= v >> 1;
3576  v |= v >> 2;
3577  v |= v >> 4;
3578  v |= v >> 8;
3579  v |= v >> 16;
3580  v |= v >> 32;
3581  v = v ^ (v >> 1);
3582  return v;
3583 }
3584 
3585 static inline bool VmaStrIsEmpty(const char* pStr)
3586 {
3587  return pStr == VMA_NULL || *pStr == '\0';
3588 }
3589 
3590 #if VMA_STATS_STRING_ENABLED
3591 
3592 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3593 {
3594  switch(algorithm)
3595  {
3597  return "Linear";
3599  return "Buddy";
3600  case 0:
3601  return "Default";
3602  default:
3603  VMA_ASSERT(0);
3604  return "";
3605  }
3606 }
3607 
3608 #endif // #if VMA_STATS_STRING_ENABLED
3609 
3610 #ifndef VMA_SORT
3611 
3612 template<typename Iterator, typename Compare>
3613 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3614 {
3615  Iterator centerValue = end; --centerValue;
3616  Iterator insertIndex = beg;
3617  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3618  {
3619  if(cmp(*memTypeIndex, *centerValue))
3620  {
3621  if(insertIndex != memTypeIndex)
3622  {
3623  VMA_SWAP(*memTypeIndex, *insertIndex);
3624  }
3625  ++insertIndex;
3626  }
3627  }
3628  if(insertIndex != centerValue)
3629  {
3630  VMA_SWAP(*insertIndex, *centerValue);
3631  }
3632  return insertIndex;
3633 }
3634 
3635 template<typename Iterator, typename Compare>
3636 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3637 {
3638  if(beg < end)
3639  {
3640  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3641  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3642  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3643  }
3644 }
3645 
3646 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3647 
3648 #endif // #ifndef VMA_SORT
3649 
3650 /*
3651 Returns true if two memory blocks occupy overlapping pages.
3652 ResourceA must be in less memory offset than ResourceB.
3653 
3654 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3655 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3656 */
3657 static inline bool VmaBlocksOnSamePage(
3658  VkDeviceSize resourceAOffset,
3659  VkDeviceSize resourceASize,
3660  VkDeviceSize resourceBOffset,
3661  VkDeviceSize pageSize)
3662 {
3663  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3664  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3665  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3666  VkDeviceSize resourceBStart = resourceBOffset;
3667  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3668  return resourceAEndPage == resourceBStartPage;
3669 }
3670 
3671 enum VmaSuballocationType
3672 {
3673  VMA_SUBALLOCATION_TYPE_FREE = 0,
3674  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3675  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3676  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3677  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3678  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3679  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3680 };
3681 
3682 /*
3683 Returns true if given suballocation types could conflict and must respect
3684 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3685 or linear image and another one is optimal image. If type is unknown, behave
3686 conservatively.
3687 */
3688 static inline bool VmaIsBufferImageGranularityConflict(
3689  VmaSuballocationType suballocType1,
3690  VmaSuballocationType suballocType2)
3691 {
3692  if(suballocType1 > suballocType2)
3693  {
3694  VMA_SWAP(suballocType1, suballocType2);
3695  }
3696 
3697  switch(suballocType1)
3698  {
3699  case VMA_SUBALLOCATION_TYPE_FREE:
3700  return false;
3701  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3702  return true;
3703  case VMA_SUBALLOCATION_TYPE_BUFFER:
3704  return
3705  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3706  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3707  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3708  return
3709  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3710  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3711  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3712  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3713  return
3714  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3715  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3716  return false;
3717  default:
3718  VMA_ASSERT(0);
3719  return true;
3720  }
3721 }
3722 
3723 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3724 {
3725  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3726  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3727  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3728  {
3729  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3730  }
3731 }
3732 
3733 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3734 {
3735  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3736  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3737  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3738  {
3739  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3740  {
3741  return false;
3742  }
3743  }
3744  return true;
3745 }
3746 
3747 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3748 struct VmaMutexLock
3749 {
3750  VMA_CLASS_NO_COPY(VmaMutexLock)
3751 public:
3752  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3753  m_pMutex(useMutex ? &mutex : VMA_NULL)
3754  { if(m_pMutex) { m_pMutex->Lock(); } }
3755  ~VmaMutexLock()
3756  { if(m_pMutex) { m_pMutex->Unlock(); } }
3757 private:
3758  VMA_MUTEX* m_pMutex;
3759 };
3760 
3761 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3762 struct VmaMutexLockRead
3763 {
3764  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3765 public:
3766  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3767  m_pMutex(useMutex ? &mutex : VMA_NULL)
3768  { if(m_pMutex) { m_pMutex->LockRead(); } }
3769  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3770 private:
3771  VMA_RW_MUTEX* m_pMutex;
3772 };
3773 
3774 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3775 struct VmaMutexLockWrite
3776 {
3777  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3778 public:
3779  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3780  m_pMutex(useMutex ? &mutex : VMA_NULL)
3781  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3782  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3783 private:
3784  VMA_RW_MUTEX* m_pMutex;
3785 };
3786 
3787 #if VMA_DEBUG_GLOBAL_MUTEX
3788  static VMA_MUTEX gDebugGlobalMutex;
3789  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3790 #else
3791  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3792 #endif
3793 
3794 // Minimum size of a free suballocation to register it in the free suballocation collection.
3795 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3796 
3797 /*
3798 Performs binary search and returns iterator to first element that is greater or
3799 equal to (key), according to comparison (cmp).
3800 
3801 Cmp should return true if first argument is less than second argument.
3802 
3803 Returned value is the found element, if present in the collection or place where
3804 new element with value (key) should be inserted.
3805 */
3806 template <typename CmpLess, typename IterT, typename KeyT>
3807 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3808 {
3809  size_t down = 0, up = (end - beg);
3810  while(down < up)
3811  {
3812  const size_t mid = (down + up) / 2;
3813  if(cmp(*(beg+mid), key))
3814  {
3815  down = mid + 1;
3816  }
3817  else
3818  {
3819  up = mid;
3820  }
3821  }
3822  return beg + down;
3823 }
3824 
3825 /*
3826 Returns true if all pointers in the array are not-null and unique.
3827 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3828 T must be pointer type, e.g. VmaAllocation, VmaPool.
3829 */
3830 template<typename T>
3831 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3832 {
3833  for(uint32_t i = 0; i < count; ++i)
3834  {
3835  const T iPtr = arr[i];
3836  if(iPtr == VMA_NULL)
3837  {
3838  return false;
3839  }
3840  for(uint32_t j = i + 1; j < count; ++j)
3841  {
3842  if(iPtr == arr[j])
3843  {
3844  return false;
3845  }
3846  }
3847  }
3848  return true;
3849 }
3850 
3852 // Memory allocation
3853 
3854 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3855 {
3856  if((pAllocationCallbacks != VMA_NULL) &&
3857  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3858  {
3859  return (*pAllocationCallbacks->pfnAllocation)(
3860  pAllocationCallbacks->pUserData,
3861  size,
3862  alignment,
3863  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3864  }
3865  else
3866  {
3867  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3868  }
3869 }
3870 
3871 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3872 {
3873  if((pAllocationCallbacks != VMA_NULL) &&
3874  (pAllocationCallbacks->pfnFree != VMA_NULL))
3875  {
3876  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3877  }
3878  else
3879  {
3880  VMA_SYSTEM_FREE(ptr);
3881  }
3882 }
3883 
3884 template<typename T>
3885 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3886 {
3887  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3888 }
3889 
3890 template<typename T>
3891 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3892 {
3893  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3894 }
3895 
3896 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3897 
3898 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3899 
3900 template<typename T>
3901 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3902 {
3903  ptr->~T();
3904  VmaFree(pAllocationCallbacks, ptr);
3905 }
3906 
3907 template<typename T>
3908 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3909 {
3910  if(ptr != VMA_NULL)
3911  {
3912  for(size_t i = count; i--; )
3913  {
3914  ptr[i].~T();
3915  }
3916  VmaFree(pAllocationCallbacks, ptr);
3917  }
3918 }
3919 
3920 // STL-compatible allocator.
3921 template<typename T>
3922 class VmaStlAllocator
3923 {
3924 public:
3925  const VkAllocationCallbacks* const m_pCallbacks;
3926  typedef T value_type;
3927 
3928  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3929  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3930 
3931  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3932  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3933 
3934  template<typename U>
3935  bool operator==(const VmaStlAllocator<U>& rhs) const
3936  {
3937  return m_pCallbacks == rhs.m_pCallbacks;
3938  }
3939  template<typename U>
3940  bool operator!=(const VmaStlAllocator<U>& rhs) const
3941  {
3942  return m_pCallbacks != rhs.m_pCallbacks;
3943  }
3944 
3945  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3946 };
3947 
3948 #if VMA_USE_STL_VECTOR
3949 
3950 #define VmaVector std::vector
3951 
3952 template<typename T, typename allocatorT>
3953 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3954 {
3955  vec.insert(vec.begin() + index, item);
3956 }
3957 
3958 template<typename T, typename allocatorT>
3959 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3960 {
3961  vec.erase(vec.begin() + index);
3962 }
3963 
3964 #else // #if VMA_USE_STL_VECTOR
3965 
3966 /* Class with interface compatible with subset of std::vector.
3967 T must be POD because constructors and destructors are not called and memcpy is
3968 used for these objects. */
3969 template<typename T, typename AllocatorT>
3970 class VmaVector
3971 {
3972 public:
3973  typedef T value_type;
3974 
3975  VmaVector(const AllocatorT& allocator) :
3976  m_Allocator(allocator),
3977  m_pArray(VMA_NULL),
3978  m_Count(0),
3979  m_Capacity(0)
3980  {
3981  }
3982 
3983  VmaVector(size_t count, const AllocatorT& allocator) :
3984  m_Allocator(allocator),
3985  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3986  m_Count(count),
3987  m_Capacity(count)
3988  {
3989  }
3990 
3991  VmaVector(const VmaVector<T, AllocatorT>& src) :
3992  m_Allocator(src.m_Allocator),
3993  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3994  m_Count(src.m_Count),
3995  m_Capacity(src.m_Count)
3996  {
3997  if(m_Count != 0)
3998  {
3999  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4000  }
4001  }
4002 
4003  ~VmaVector()
4004  {
4005  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4006  }
4007 
4008  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4009  {
4010  if(&rhs != this)
4011  {
4012  resize(rhs.m_Count);
4013  if(m_Count != 0)
4014  {
4015  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4016  }
4017  }
4018  return *this;
4019  }
4020 
4021  bool empty() const { return m_Count == 0; }
4022  size_t size() const { return m_Count; }
4023  T* data() { return m_pArray; }
4024  const T* data() const { return m_pArray; }
4025 
4026  T& operator[](size_t index)
4027  {
4028  VMA_HEAVY_ASSERT(index < m_Count);
4029  return m_pArray[index];
4030  }
4031  const T& operator[](size_t index) const
4032  {
4033  VMA_HEAVY_ASSERT(index < m_Count);
4034  return m_pArray[index];
4035  }
4036 
4037  T& front()
4038  {
4039  VMA_HEAVY_ASSERT(m_Count > 0);
4040  return m_pArray[0];
4041  }
4042  const T& front() const
4043  {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  return m_pArray[0];
4046  }
4047  T& back()
4048  {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  return m_pArray[m_Count - 1];
4051  }
4052  const T& back() const
4053  {
4054  VMA_HEAVY_ASSERT(m_Count > 0);
4055  return m_pArray[m_Count - 1];
4056  }
4057 
4058  void reserve(size_t newCapacity, bool freeMemory = false)
4059  {
4060  newCapacity = VMA_MAX(newCapacity, m_Count);
4061 
4062  if((newCapacity < m_Capacity) && !freeMemory)
4063  {
4064  newCapacity = m_Capacity;
4065  }
4066 
4067  if(newCapacity != m_Capacity)
4068  {
4069  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4070  if(m_Count != 0)
4071  {
4072  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4073  }
4074  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4075  m_Capacity = newCapacity;
4076  m_pArray = newArray;
4077  }
4078  }
4079 
4080  void resize(size_t newCount, bool freeMemory = false)
4081  {
4082  size_t newCapacity = m_Capacity;
4083  if(newCount > m_Capacity)
4084  {
4085  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4086  }
4087  else if(freeMemory)
4088  {
4089  newCapacity = newCount;
4090  }
4091 
4092  if(newCapacity != m_Capacity)
4093  {
4094  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4095  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4096  if(elementsToCopy != 0)
4097  {
4098  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4099  }
4100  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4101  m_Capacity = newCapacity;
4102  m_pArray = newArray;
4103  }
4104 
4105  m_Count = newCount;
4106  }
4107 
4108  void clear(bool freeMemory = false)
4109  {
4110  resize(0, freeMemory);
4111  }
4112 
4113  void insert(size_t index, const T& src)
4114  {
4115  VMA_HEAVY_ASSERT(index <= m_Count);
4116  const size_t oldCount = size();
4117  resize(oldCount + 1);
4118  if(index < oldCount)
4119  {
4120  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4121  }
4122  m_pArray[index] = src;
4123  }
4124 
4125  void remove(size_t index)
4126  {
4127  VMA_HEAVY_ASSERT(index < m_Count);
4128  const size_t oldCount = size();
4129  if(index < oldCount - 1)
4130  {
4131  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4132  }
4133  resize(oldCount - 1);
4134  }
4135 
4136  void push_back(const T& src)
4137  {
4138  const size_t newIndex = size();
4139  resize(newIndex + 1);
4140  m_pArray[newIndex] = src;
4141  }
4142 
4143  void pop_back()
4144  {
4145  VMA_HEAVY_ASSERT(m_Count > 0);
4146  resize(size() - 1);
4147  }
4148 
4149  void push_front(const T& src)
4150  {
4151  insert(0, src);
4152  }
4153 
4154  void pop_front()
4155  {
4156  VMA_HEAVY_ASSERT(m_Count > 0);
4157  remove(0);
4158  }
4159 
4160  typedef T* iterator;
4161 
4162  iterator begin() { return m_pArray; }
4163  iterator end() { return m_pArray + m_Count; }
4164 
4165 private:
4166  AllocatorT m_Allocator;
4167  T* m_pArray;
4168  size_t m_Count;
4169  size_t m_Capacity;
4170 };
4171 
4172 template<typename T, typename allocatorT>
4173 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4174 {
4175  vec.insert(index, item);
4176 }
4177 
4178 template<typename T, typename allocatorT>
4179 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4180 {
4181  vec.remove(index);
4182 }
4183 
4184 #endif // #if VMA_USE_STL_VECTOR
4185 
4186 template<typename CmpLess, typename VectorT>
4187 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4188 {
4189  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4190  vector.data(),
4191  vector.data() + vector.size(),
4192  value,
4193  CmpLess()) - vector.data();
4194  VmaVectorInsert(vector, indexToInsert, value);
4195  return indexToInsert;
4196 }
4197 
4198 template<typename CmpLess, typename VectorT>
4199 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4200 {
4201  CmpLess comparator;
4202  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4203  vector.begin(),
4204  vector.end(),
4205  value,
4206  comparator);
4207  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4208  {
4209  size_t indexToRemove = it - vector.begin();
4210  VmaVectorRemove(vector, indexToRemove);
4211  return true;
4212  }
4213  return false;
4214 }
4215 
4216 template<typename CmpLess, typename IterT, typename KeyT>
4217 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4218 {
4219  CmpLess comparator;
4220  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4221  beg, end, value, comparator);
4222  if(it == end ||
4223  (!comparator(*it, value) && !comparator(value, *it)))
4224  {
4225  return it;
4226  }
4227  return end;
4228 }
4229 
4231 // class VmaPoolAllocator
4232 
4233 /*
4234 Allocator for objects of type T using a list of arrays (pools) to speed up
4235 allocation. Number of elements that can be allocated is not bounded because
4236 allocator can create multiple blocks.
4237 */
4238 template<typename T>
4239 class VmaPoolAllocator
4240 {
4241  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4242 public:
4243  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4244  ~VmaPoolAllocator();
4245  void Clear();
4246  T* Alloc();
4247  void Free(T* ptr);
4248 
4249 private:
4250  union Item
4251  {
4252  uint32_t NextFreeIndex;
4253  T Value;
4254  };
4255 
4256  struct ItemBlock
4257  {
4258  Item* pItems;
4259  uint32_t Capacity;
4260  uint32_t FirstFreeIndex;
4261  };
4262 
4263  const VkAllocationCallbacks* m_pAllocationCallbacks;
4264  const uint32_t m_FirstBlockCapacity;
4265  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4266 
4267  ItemBlock& CreateNewBlock();
4268 };
4269 
4270 template<typename T>
4271 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4272  m_pAllocationCallbacks(pAllocationCallbacks),
4273  m_FirstBlockCapacity(firstBlockCapacity),
4274  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4275 {
4276  VMA_ASSERT(m_FirstBlockCapacity > 1);
4277 }
4278 
4279 template<typename T>
4280 VmaPoolAllocator<T>::~VmaPoolAllocator()
4281 {
4282  Clear();
4283 }
4284 
4285 template<typename T>
4286 void VmaPoolAllocator<T>::Clear()
4287 {
4288  for(size_t i = m_ItemBlocks.size(); i--; )
4289  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4290  m_ItemBlocks.clear();
4291 }
4292 
4293 template<typename T>
4294 T* VmaPoolAllocator<T>::Alloc()
4295 {
4296  for(size_t i = m_ItemBlocks.size(); i--; )
4297  {
4298  ItemBlock& block = m_ItemBlocks[i];
4299  // This block has some free items: Use first one.
4300  if(block.FirstFreeIndex != UINT32_MAX)
4301  {
4302  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4303  block.FirstFreeIndex = pItem->NextFreeIndex;
4304  return &pItem->Value;
4305  }
4306  }
4307 
4308  // No block has free item: Create new one and use it.
4309  ItemBlock& newBlock = CreateNewBlock();
4310  Item* const pItem = &newBlock.pItems[0];
4311  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4312  return &pItem->Value;
4313 }
4314 
4315 template<typename T>
4316 void VmaPoolAllocator<T>::Free(T* ptr)
4317 {
4318  // Search all memory blocks to find ptr.
4319  for(size_t i = m_ItemBlocks.size(); i--; )
4320  {
4321  ItemBlock& block = m_ItemBlocks[i];
4322 
4323  // Casting to union.
4324  Item* pItemPtr;
4325  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4326 
4327  // Check if pItemPtr is in address range of this block.
4328  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4329  {
4330  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4331  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4332  block.FirstFreeIndex = index;
4333  return;
4334  }
4335  }
4336  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4337 }
4338 
4339 template<typename T>
4340 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4341 {
4342  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4343  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4344 
4345  const ItemBlock newBlock = {
4346  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4347  newBlockCapacity,
4348  0 };
4349 
4350  m_ItemBlocks.push_back(newBlock);
4351 
4352  // Setup singly-linked list of all free items in this block.
4353  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4354  newBlock.pItems[i].NextFreeIndex = i + 1;
4355  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4356  return m_ItemBlocks.back();
4357 }
4358 
4360 // class VmaRawList, VmaList
4361 
4362 #if VMA_USE_STL_LIST
4363 
4364 #define VmaList std::list
4365 
4366 #else // #if VMA_USE_STL_LIST
4367 
4368 template<typename T>
4369 struct VmaListItem
4370 {
4371  VmaListItem* pPrev;
4372  VmaListItem* pNext;
4373  T Value;
4374 };
4375 
4376 // Doubly linked list.
4377 template<typename T>
4378 class VmaRawList
4379 {
4380  VMA_CLASS_NO_COPY(VmaRawList)
4381 public:
4382  typedef VmaListItem<T> ItemType;
4383 
4384  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4385  ~VmaRawList();
4386  void Clear();
4387 
4388  size_t GetCount() const { return m_Count; }
4389  bool IsEmpty() const { return m_Count == 0; }
4390 
4391  ItemType* Front() { return m_pFront; }
4392  const ItemType* Front() const { return m_pFront; }
4393  ItemType* Back() { return m_pBack; }
4394  const ItemType* Back() const { return m_pBack; }
4395 
4396  ItemType* PushBack();
4397  ItemType* PushFront();
4398  ItemType* PushBack(const T& value);
4399  ItemType* PushFront(const T& value);
4400  void PopBack();
4401  void PopFront();
4402 
4403  // Item can be null - it means PushBack.
4404  ItemType* InsertBefore(ItemType* pItem);
4405  // Item can be null - it means PushFront.
4406  ItemType* InsertAfter(ItemType* pItem);
4407 
4408  ItemType* InsertBefore(ItemType* pItem, const T& value);
4409  ItemType* InsertAfter(ItemType* pItem, const T& value);
4410 
4411  void Remove(ItemType* pItem);
4412 
4413 private:
4414  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4415  VmaPoolAllocator<ItemType> m_ItemAllocator;
4416  ItemType* m_pFront;
4417  ItemType* m_pBack;
4418  size_t m_Count;
4419 };
4420 
4421 template<typename T>
4422 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4423  m_pAllocationCallbacks(pAllocationCallbacks),
4424  m_ItemAllocator(pAllocationCallbacks, 128),
4425  m_pFront(VMA_NULL),
4426  m_pBack(VMA_NULL),
4427  m_Count(0)
4428 {
4429 }
4430 
4431 template<typename T>
4432 VmaRawList<T>::~VmaRawList()
4433 {
4434  // Intentionally not calling Clear, because that would be unnecessary
4435  // computations to return all items to m_ItemAllocator as free.
4436 }
4437 
4438 template<typename T>
4439 void VmaRawList<T>::Clear()
4440 {
4441  if(IsEmpty() == false)
4442  {
4443  ItemType* pItem = m_pBack;
4444  while(pItem != VMA_NULL)
4445  {
4446  ItemType* const pPrevItem = pItem->pPrev;
4447  m_ItemAllocator.Free(pItem);
4448  pItem = pPrevItem;
4449  }
4450  m_pFront = VMA_NULL;
4451  m_pBack = VMA_NULL;
4452  m_Count = 0;
4453  }
4454 }
4455 
4456 template<typename T>
4457 VmaListItem<T>* VmaRawList<T>::PushBack()
4458 {
4459  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4460  pNewItem->pNext = VMA_NULL;
4461  if(IsEmpty())
4462  {
4463  pNewItem->pPrev = VMA_NULL;
4464  m_pFront = pNewItem;
4465  m_pBack = pNewItem;
4466  m_Count = 1;
4467  }
4468  else
4469  {
4470  pNewItem->pPrev = m_pBack;
4471  m_pBack->pNext = pNewItem;
4472  m_pBack = pNewItem;
4473  ++m_Count;
4474  }
4475  return pNewItem;
4476 }
4477 
4478 template<typename T>
4479 VmaListItem<T>* VmaRawList<T>::PushFront()
4480 {
4481  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4482  pNewItem->pPrev = VMA_NULL;
4483  if(IsEmpty())
4484  {
4485  pNewItem->pNext = VMA_NULL;
4486  m_pFront = pNewItem;
4487  m_pBack = pNewItem;
4488  m_Count = 1;
4489  }
4490  else
4491  {
4492  pNewItem->pNext = m_pFront;
4493  m_pFront->pPrev = pNewItem;
4494  m_pFront = pNewItem;
4495  ++m_Count;
4496  }
4497  return pNewItem;
4498 }
4499 
4500 template<typename T>
4501 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4502 {
4503  ItemType* const pNewItem = PushBack();
4504  pNewItem->Value = value;
4505  return pNewItem;
4506 }
4507 
4508 template<typename T>
4509 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4510 {
4511  ItemType* const pNewItem = PushFront();
4512  pNewItem->Value = value;
4513  return pNewItem;
4514 }
4515 
4516 template<typename T>
4517 void VmaRawList<T>::PopBack()
4518 {
4519  VMA_HEAVY_ASSERT(m_Count > 0);
4520  ItemType* const pBackItem = m_pBack;
4521  ItemType* const pPrevItem = pBackItem->pPrev;
4522  if(pPrevItem != VMA_NULL)
4523  {
4524  pPrevItem->pNext = VMA_NULL;
4525  }
4526  m_pBack = pPrevItem;
4527  m_ItemAllocator.Free(pBackItem);
4528  --m_Count;
4529 }
4530 
4531 template<typename T>
4532 void VmaRawList<T>::PopFront()
4533 {
4534  VMA_HEAVY_ASSERT(m_Count > 0);
4535  ItemType* const pFrontItem = m_pFront;
4536  ItemType* const pNextItem = pFrontItem->pNext;
4537  if(pNextItem != VMA_NULL)
4538  {
4539  pNextItem->pPrev = VMA_NULL;
4540  }
4541  m_pFront = pNextItem;
4542  m_ItemAllocator.Free(pFrontItem);
4543  --m_Count;
4544 }
4545 
4546 template<typename T>
4547 void VmaRawList<T>::Remove(ItemType* pItem)
4548 {
4549  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4550  VMA_HEAVY_ASSERT(m_Count > 0);
4551 
4552  if(pItem->pPrev != VMA_NULL)
4553  {
4554  pItem->pPrev->pNext = pItem->pNext;
4555  }
4556  else
4557  {
4558  VMA_HEAVY_ASSERT(m_pFront == pItem);
4559  m_pFront = pItem->pNext;
4560  }
4561 
4562  if(pItem->pNext != VMA_NULL)
4563  {
4564  pItem->pNext->pPrev = pItem->pPrev;
4565  }
4566  else
4567  {
4568  VMA_HEAVY_ASSERT(m_pBack == pItem);
4569  m_pBack = pItem->pPrev;
4570  }
4571 
4572  m_ItemAllocator.Free(pItem);
4573  --m_Count;
4574 }
4575 
4576 template<typename T>
4577 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4578 {
4579  if(pItem != VMA_NULL)
4580  {
4581  ItemType* const prevItem = pItem->pPrev;
4582  ItemType* const newItem = m_ItemAllocator.Alloc();
4583  newItem->pPrev = prevItem;
4584  newItem->pNext = pItem;
4585  pItem->pPrev = newItem;
4586  if(prevItem != VMA_NULL)
4587  {
4588  prevItem->pNext = newItem;
4589  }
4590  else
4591  {
4592  VMA_HEAVY_ASSERT(m_pFront == pItem);
4593  m_pFront = newItem;
4594  }
4595  ++m_Count;
4596  return newItem;
4597  }
4598  else
4599  return PushBack();
4600 }
4601 
4602 template<typename T>
4603 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4604 {
4605  if(pItem != VMA_NULL)
4606  {
4607  ItemType* const nextItem = pItem->pNext;
4608  ItemType* const newItem = m_ItemAllocator.Alloc();
4609  newItem->pNext = nextItem;
4610  newItem->pPrev = pItem;
4611  pItem->pNext = newItem;
4612  if(nextItem != VMA_NULL)
4613  {
4614  nextItem->pPrev = newItem;
4615  }
4616  else
4617  {
4618  VMA_HEAVY_ASSERT(m_pBack == pItem);
4619  m_pBack = newItem;
4620  }
4621  ++m_Count;
4622  return newItem;
4623  }
4624  else
4625  return PushFront();
4626 }
4627 
4628 template<typename T>
4629 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4630 {
4631  ItemType* const newItem = InsertBefore(pItem);
4632  newItem->Value = value;
4633  return newItem;
4634 }
4635 
4636 template<typename T>
4637 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4638 {
4639  ItemType* const newItem = InsertAfter(pItem);
4640  newItem->Value = value;
4641  return newItem;
4642 }
4643 
4644 template<typename T, typename AllocatorT>
4645 class VmaList
4646 {
4647  VMA_CLASS_NO_COPY(VmaList)
4648 public:
4649  class iterator
4650  {
4651  public:
4652  iterator() :
4653  m_pList(VMA_NULL),
4654  m_pItem(VMA_NULL)
4655  {
4656  }
4657 
4658  T& operator*() const
4659  {
4660  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4661  return m_pItem->Value;
4662  }
4663  T* operator->() const
4664  {
4665  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4666  return &m_pItem->Value;
4667  }
4668 
4669  iterator& operator++()
4670  {
4671  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4672  m_pItem = m_pItem->pNext;
4673  return *this;
4674  }
4675  iterator& operator--()
4676  {
4677  if(m_pItem != VMA_NULL)
4678  {
4679  m_pItem = m_pItem->pPrev;
4680  }
4681  else
4682  {
4683  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4684  m_pItem = m_pList->Back();
4685  }
4686  return *this;
4687  }
4688 
4689  iterator operator++(int)
4690  {
4691  iterator result = *this;
4692  ++*this;
4693  return result;
4694  }
4695  iterator operator--(int)
4696  {
4697  iterator result = *this;
4698  --*this;
4699  return result;
4700  }
4701 
4702  bool operator==(const iterator& rhs) const
4703  {
4704  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4705  return m_pItem == rhs.m_pItem;
4706  }
4707  bool operator!=(const iterator& rhs) const
4708  {
4709  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4710  return m_pItem != rhs.m_pItem;
4711  }
4712 
4713  private:
4714  VmaRawList<T>* m_pList;
4715  VmaListItem<T>* m_pItem;
4716 
4717  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4718  m_pList(pList),
4719  m_pItem(pItem)
4720  {
4721  }
4722 
4723  friend class VmaList<T, AllocatorT>;
4724  };
4725 
4726  class const_iterator
4727  {
4728  public:
4729  const_iterator() :
4730  m_pList(VMA_NULL),
4731  m_pItem(VMA_NULL)
4732  {
4733  }
4734 
4735  const_iterator(const iterator& src) :
4736  m_pList(src.m_pList),
4737  m_pItem(src.m_pItem)
4738  {
4739  }
4740 
4741  const T& operator*() const
4742  {
4743  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4744  return m_pItem->Value;
4745  }
4746  const T* operator->() const
4747  {
4748  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4749  return &m_pItem->Value;
4750  }
4751 
4752  const_iterator& operator++()
4753  {
4754  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4755  m_pItem = m_pItem->pNext;
4756  return *this;
4757  }
4758  const_iterator& operator--()
4759  {
4760  if(m_pItem != VMA_NULL)
4761  {
4762  m_pItem = m_pItem->pPrev;
4763  }
4764  else
4765  {
4766  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4767  m_pItem = m_pList->Back();
4768  }
4769  return *this;
4770  }
4771 
4772  const_iterator operator++(int)
4773  {
4774  const_iterator result = *this;
4775  ++*this;
4776  return result;
4777  }
4778  const_iterator operator--(int)
4779  {
4780  const_iterator result = *this;
4781  --*this;
4782  return result;
4783  }
4784 
4785  bool operator==(const const_iterator& rhs) const
4786  {
4787  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4788  return m_pItem == rhs.m_pItem;
4789  }
4790  bool operator!=(const const_iterator& rhs) const
4791  {
4792  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4793  return m_pItem != rhs.m_pItem;
4794  }
4795 
4796  private:
4797  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4798  m_pList(pList),
4799  m_pItem(pItem)
4800  {
4801  }
4802 
4803  const VmaRawList<T>* m_pList;
4804  const VmaListItem<T>* m_pItem;
4805 
4806  friend class VmaList<T, AllocatorT>;
4807  };
4808 
4809  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4810 
4811  bool empty() const { return m_RawList.IsEmpty(); }
4812  size_t size() const { return m_RawList.GetCount(); }
4813 
4814  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4815  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4816 
4817  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4818  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4819 
4820  void clear() { m_RawList.Clear(); }
4821  void push_back(const T& value) { m_RawList.PushBack(value); }
4822  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4823  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4824 
4825 private:
4826  VmaRawList<T> m_RawList;
4827 };
4828 
4829 #endif // #if VMA_USE_STL_LIST
4830 
4832 // class VmaMap
4833 
4834 // Unused in this version.
4835 #if 0
4836 
4837 #if VMA_USE_STL_UNORDERED_MAP
4838 
4839 #define VmaPair std::pair
4840 
4841 #define VMA_MAP_TYPE(KeyT, ValueT) \
4842  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4843 
4844 #else // #if VMA_USE_STL_UNORDERED_MAP
4845 
4846 template<typename T1, typename T2>
4847 struct VmaPair
4848 {
4849  T1 first;
4850  T2 second;
4851 
4852  VmaPair() : first(), second() { }
4853  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4854 };
4855 
4856 /* Class compatible with subset of interface of std::unordered_map.
4857 KeyT, ValueT must be POD because they will be stored in VmaVector.
4858 */
4859 template<typename KeyT, typename ValueT>
4860 class VmaMap
4861 {
4862 public:
4863  typedef VmaPair<KeyT, ValueT> PairType;
4864  typedef PairType* iterator;
4865 
4866  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4867 
4868  iterator begin() { return m_Vector.begin(); }
4869  iterator end() { return m_Vector.end(); }
4870 
4871  void insert(const PairType& pair);
4872  iterator find(const KeyT& key);
4873  void erase(iterator it);
4874 
4875 private:
4876  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4877 };
4878 
4879 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4880 
4881 template<typename FirstT, typename SecondT>
4882 struct VmaPairFirstLess
4883 {
4884  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4885  {
4886  return lhs.first < rhs.first;
4887  }
4888  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4889  {
4890  return lhs.first < rhsFirst;
4891  }
4892 };
4893 
4894 template<typename KeyT, typename ValueT>
4895 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4896 {
4897  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4898  m_Vector.data(),
4899  m_Vector.data() + m_Vector.size(),
4900  pair,
4901  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4902  VmaVectorInsert(m_Vector, indexToInsert, pair);
4903 }
4904 
4905 template<typename KeyT, typename ValueT>
4906 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4907 {
4908  PairType* it = VmaBinaryFindFirstNotLess(
4909  m_Vector.data(),
4910  m_Vector.data() + m_Vector.size(),
4911  key,
4912  VmaPairFirstLess<KeyT, ValueT>());
4913  if((it != m_Vector.end()) && (it->first == key))
4914  {
4915  return it;
4916  }
4917  else
4918  {
4919  return m_Vector.end();
4920  }
4921 }
4922 
4923 template<typename KeyT, typename ValueT>
4924 void VmaMap<KeyT, ValueT>::erase(iterator it)
4925 {
4926  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4927 }
4928 
4929 #endif // #if VMA_USE_STL_UNORDERED_MAP
4930 
4931 #endif // #if 0
4932 
4934 
4935 class VmaDeviceMemoryBlock;
4936 
4937 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4938 
4939 struct VmaAllocation_T
4940 {
4941 private:
4942  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4943 
4944  enum FLAGS
4945  {
4946  FLAG_USER_DATA_STRING = 0x01,
4947  };
4948 
4949 public:
4950  enum ALLOCATION_TYPE
4951  {
4952  ALLOCATION_TYPE_NONE,
4953  ALLOCATION_TYPE_BLOCK,
4954  ALLOCATION_TYPE_DEDICATED,
4955  };
4956 
4957  /*
4958  This struct cannot have constructor or destructor. It must be POD because it is
4959  allocated using VmaPoolAllocator.
4960  */
4961 
4962  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4963  {
4964  m_Alignment = 1;
4965  m_Size = 0;
4966  m_pUserData = VMA_NULL;
4967  m_LastUseFrameIndex = currentFrameIndex;
4968  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4969  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4970  m_MapCount = 0;
4971  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4972 
4973 #if VMA_STATS_STRING_ENABLED
4974  m_CreationFrameIndex = currentFrameIndex;
4975  m_BufferImageUsage = 0;
4976 #endif
4977  }
4978 
4979  void Dtor()
4980  {
4981  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4982 
4983  // Check if owned string was freed.
4984  VMA_ASSERT(m_pUserData == VMA_NULL);
4985  }
4986 
4987  void InitBlockAllocation(
4988  VmaDeviceMemoryBlock* block,
4989  VkDeviceSize offset,
4990  VkDeviceSize alignment,
4991  VkDeviceSize size,
4992  VmaSuballocationType suballocationType,
4993  bool mapped,
4994  bool canBecomeLost)
4995  {
4996  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4997  VMA_ASSERT(block != VMA_NULL);
4998  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4999  m_Alignment = alignment;
5000  m_Size = size;
5001  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5002  m_SuballocationType = (uint8_t)suballocationType;
5003  m_BlockAllocation.m_Block = block;
5004  m_BlockAllocation.m_Offset = offset;
5005  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5006  }
5007 
5008  void InitLost()
5009  {
5010  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5011  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5012  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5013  m_BlockAllocation.m_Block = VMA_NULL;
5014  m_BlockAllocation.m_Offset = 0;
5015  m_BlockAllocation.m_CanBecomeLost = true;
5016  }
5017 
5018  void ChangeBlockAllocation(
5019  VmaAllocator hAllocator,
5020  VmaDeviceMemoryBlock* block,
5021  VkDeviceSize offset);
5022 
5023  void ChangeSize(VkDeviceSize newSize);
5024  void ChangeOffset(VkDeviceSize newOffset);
5025 
5026  // pMappedData not null means allocation is created with MAPPED flag.
5027  void InitDedicatedAllocation(
5028  uint32_t memoryTypeIndex,
5029  VkDeviceMemory hMemory,
5030  VmaSuballocationType suballocationType,
5031  void* pMappedData,
5032  VkDeviceSize size)
5033  {
5034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5035  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5036  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5037  m_Alignment = 0;
5038  m_Size = size;
5039  m_SuballocationType = (uint8_t)suballocationType;
5040  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5041  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5042  m_DedicatedAllocation.m_hMemory = hMemory;
5043  m_DedicatedAllocation.m_pMappedData = pMappedData;
5044  }
5045 
5046  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5047  VkDeviceSize GetAlignment() const { return m_Alignment; }
5048  VkDeviceSize GetSize() const { return m_Size; }
5049  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5050  void* GetUserData() const { return m_pUserData; }
5051  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5052  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5053 
5054  VmaDeviceMemoryBlock* GetBlock() const
5055  {
5056  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5057  return m_BlockAllocation.m_Block;
5058  }
5059  VkDeviceSize GetOffset() const;
5060  VkDeviceMemory GetMemory() const;
5061  uint32_t GetMemoryTypeIndex() const;
5062  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5063  void* GetMappedData() const;
5064  bool CanBecomeLost() const;
5065 
5066  uint32_t GetLastUseFrameIndex() const
5067  {
5068  return m_LastUseFrameIndex.load();
5069  }
5070  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5071  {
5072  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5073  }
5074  /*
5075  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5076  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5077  - Else, returns false.
5078 
5079  If hAllocation is already lost, assert - you should not call it then.
5080  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5081  */
5082  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5083 
5084  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5085  {
5086  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5087  outInfo.blockCount = 1;
5088  outInfo.allocationCount = 1;
5089  outInfo.unusedRangeCount = 0;
5090  outInfo.usedBytes = m_Size;
5091  outInfo.unusedBytes = 0;
5092  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5093  outInfo.unusedRangeSizeMin = UINT64_MAX;
5094  outInfo.unusedRangeSizeMax = 0;
5095  }
5096 
5097  void BlockAllocMap();
5098  void BlockAllocUnmap();
5099  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5100  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5101 
5102 #if VMA_STATS_STRING_ENABLED
5103  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5104  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5105 
5106  void InitBufferImageUsage(uint32_t bufferImageUsage)
5107  {
5108  VMA_ASSERT(m_BufferImageUsage == 0);
5109  m_BufferImageUsage = bufferImageUsage;
5110  }
5111 
5112  void PrintParameters(class VmaJsonWriter& json) const;
5113 #endif
5114 
5115 private:
5116  VkDeviceSize m_Alignment;
5117  VkDeviceSize m_Size;
5118  void* m_pUserData;
5119  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5120  uint8_t m_Type; // ALLOCATION_TYPE
5121  uint8_t m_SuballocationType; // VmaSuballocationType
5122  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5123  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5124  uint8_t m_MapCount;
5125  uint8_t m_Flags; // enum FLAGS
5126 
5127  // Allocation out of VmaDeviceMemoryBlock.
5128  struct BlockAllocation
5129  {
5130  VmaDeviceMemoryBlock* m_Block;
5131  VkDeviceSize m_Offset;
5132  bool m_CanBecomeLost;
5133  };
5134 
5135  // Allocation for an object that has its own private VkDeviceMemory.
5136  struct DedicatedAllocation
5137  {
5138  uint32_t m_MemoryTypeIndex;
5139  VkDeviceMemory m_hMemory;
5140  void* m_pMappedData; // Not null means memory is mapped.
5141  };
5142 
5143  union
5144  {
5145  // Allocation out of VmaDeviceMemoryBlock.
5146  BlockAllocation m_BlockAllocation;
5147  // Allocation for an object that has its own private VkDeviceMemory.
5148  DedicatedAllocation m_DedicatedAllocation;
5149  };
5150 
5151 #if VMA_STATS_STRING_ENABLED
5152  uint32_t m_CreationFrameIndex;
5153  uint32_t m_BufferImageUsage; // 0 if unknown.
5154 #endif
5155 
5156  void FreeUserDataString(VmaAllocator hAllocator);
5157 };
5158 
5159 /*
5160 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5161 allocated memory block or free.
5162 */
5163 struct VmaSuballocation
5164 {
5165  VkDeviceSize offset;
5166  VkDeviceSize size;
5167  VmaAllocation hAllocation;
5168  VmaSuballocationType type;
5169 };
5170 
5171 // Comparator for offsets.
5172 struct VmaSuballocationOffsetLess
5173 {
5174  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5175  {
5176  return lhs.offset < rhs.offset;
5177  }
5178 };
5179 struct VmaSuballocationOffsetGreater
5180 {
5181  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5182  {
5183  return lhs.offset > rhs.offset;
5184  }
5185 };
5186 
5187 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5188 
5189 // Cost of one additional allocation lost, as equivalent in bytes.
5190 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5191 
5192 enum class VmaAllocationRequestType
5193 {
5194  Normal,
5195  // Used by "Linear" algorithm.
5196  UpperAddress,
5197  EndOf1st,
5198  EndOf2nd,
5199 };
5200 
5201 /*
5202 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5203 
5204 If canMakeOtherLost was false:
5205 - item points to a FREE suballocation.
5206 - itemsToMakeLostCount is 0.
5207 
5208 If canMakeOtherLost was true:
5209 - item points to first of sequence of suballocations, which are either FREE,
5210  or point to VmaAllocations that can become lost.
5211 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5212  the requested allocation to succeed.
5213 */
5214 struct VmaAllocationRequest
5215 {
5216  VkDeviceSize offset;
5217  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5218  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5219  VmaSuballocationList::iterator item;
5220  size_t itemsToMakeLostCount;
5221  void* customData;
5222  VmaAllocationRequestType type;
5223 
5224  VkDeviceSize CalcCost() const
5225  {
5226  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5227  }
5228 };
5229 
5230 /*
5231 Data structure used for bookkeeping of allocations and unused ranges of memory
5232 in a single VkDeviceMemory block.
5233 */
5234 class VmaBlockMetadata
5235 {
5236 public:
5237  VmaBlockMetadata(VmaAllocator hAllocator);
5238  virtual ~VmaBlockMetadata() { }
5239  virtual void Init(VkDeviceSize size) { m_Size = size; }
5240 
5241  // Validates all data structures inside this object. If not valid, returns false.
5242  virtual bool Validate() const = 0;
5243  VkDeviceSize GetSize() const { return m_Size; }
5244  virtual size_t GetAllocationCount() const = 0;
5245  virtual VkDeviceSize GetSumFreeSize() const = 0;
5246  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5247  // Returns true if this block is empty - contains only single free suballocation.
5248  virtual bool IsEmpty() const = 0;
5249 
5250  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5251  // Shouldn't modify blockCount.
5252  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5253 
5254 #if VMA_STATS_STRING_ENABLED
5255  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5256 #endif
5257 
5258  // Tries to find a place for suballocation with given parameters inside this block.
5259  // If succeeded, fills pAllocationRequest and returns true.
5260  // If failed, returns false.
5261  virtual bool CreateAllocationRequest(
5262  uint32_t currentFrameIndex,
5263  uint32_t frameInUseCount,
5264  VkDeviceSize bufferImageGranularity,
5265  VkDeviceSize allocSize,
5266  VkDeviceSize allocAlignment,
5267  bool upperAddress,
5268  VmaSuballocationType allocType,
5269  bool canMakeOtherLost,
5270  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5271  uint32_t strategy,
5272  VmaAllocationRequest* pAllocationRequest) = 0;
5273 
5274  virtual bool MakeRequestedAllocationsLost(
5275  uint32_t currentFrameIndex,
5276  uint32_t frameInUseCount,
5277  VmaAllocationRequest* pAllocationRequest) = 0;
5278 
5279  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5280 
5281  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5282 
5283  // Makes actual allocation based on request. Request must already be checked and valid.
5284  virtual void Alloc(
5285  const VmaAllocationRequest& request,
5286  VmaSuballocationType type,
5287  VkDeviceSize allocSize,
5288  VmaAllocation hAllocation) = 0;
5289 
5290  // Frees suballocation assigned to given memory region.
5291  virtual void Free(const VmaAllocation allocation) = 0;
5292  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5293 
5294  // Tries to resize (grow or shrink) space for given allocation, in place.
5295  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5296 
5297 protected:
5298  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5299 
5300 #if VMA_STATS_STRING_ENABLED
5301  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5302  VkDeviceSize unusedBytes,
5303  size_t allocationCount,
5304  size_t unusedRangeCount) const;
5305  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5306  VkDeviceSize offset,
5307  VmaAllocation hAllocation) const;
5308  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5309  VkDeviceSize offset,
5310  VkDeviceSize size) const;
5311  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5312 #endif
5313 
5314 private:
5315  VkDeviceSize m_Size;
5316  const VkAllocationCallbacks* m_pAllocationCallbacks;
5317 };
5318 
5319 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5320  VMA_ASSERT(0 && "Validation failed: " #cond); \
5321  return false; \
5322  } } while(false)
5323 
5324 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5325 {
5326  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5327 public:
5328  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5329  virtual ~VmaBlockMetadata_Generic();
5330  virtual void Init(VkDeviceSize size);
5331 
5332  virtual bool Validate() const;
5333  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5334  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5335  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5336  virtual bool IsEmpty() const;
5337 
5338  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5339  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5340 
5341 #if VMA_STATS_STRING_ENABLED
5342  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5343 #endif
5344 
5345  virtual bool CreateAllocationRequest(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  bool upperAddress,
5352  VmaSuballocationType allocType,
5353  bool canMakeOtherLost,
5354  uint32_t strategy,
5355  VmaAllocationRequest* pAllocationRequest);
5356 
5357  virtual bool MakeRequestedAllocationsLost(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VmaAllocationRequest* pAllocationRequest);
5361 
5362  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5363 
5364  virtual VkResult CheckCorruption(const void* pBlockData);
5365 
5366  virtual void Alloc(
5367  const VmaAllocationRequest& request,
5368  VmaSuballocationType type,
5369  VkDeviceSize allocSize,
5370  VmaAllocation hAllocation);
5371 
5372  virtual void Free(const VmaAllocation allocation);
5373  virtual void FreeAtOffset(VkDeviceSize offset);
5374 
5375  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5376 
5378  // For defragmentation
5379 
5380  bool IsBufferImageGranularityConflictPossible(
5381  VkDeviceSize bufferImageGranularity,
5382  VmaSuballocationType& inOutPrevSuballocType) const;
5383 
5384 private:
5385  friend class VmaDefragmentationAlgorithm_Generic;
5386  friend class VmaDefragmentationAlgorithm_Fast;
5387 
5388  uint32_t m_FreeCount;
5389  VkDeviceSize m_SumFreeSize;
5390  VmaSuballocationList m_Suballocations;
5391  // Suballocations that are free and have size greater than certain threshold.
5392  // Sorted by size, ascending.
5393  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5394 
5395  bool ValidateFreeSuballocationList() const;
5396 
5397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5398  // If yes, fills pOffset and returns true. If no, returns false.
5399  bool CheckAllocation(
5400  uint32_t currentFrameIndex,
5401  uint32_t frameInUseCount,
5402  VkDeviceSize bufferImageGranularity,
5403  VkDeviceSize allocSize,
5404  VkDeviceSize allocAlignment,
5405  VmaSuballocationType allocType,
5406  VmaSuballocationList::const_iterator suballocItem,
5407  bool canMakeOtherLost,
5408  VkDeviceSize* pOffset,
5409  size_t* itemsToMakeLostCount,
5410  VkDeviceSize* pSumFreeSize,
5411  VkDeviceSize* pSumItemSize) const;
5412  // Given free suballocation, it merges it with following one, which must also be free.
5413  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5414  // Releases given suballocation, making it free.
5415  // Merges it with adjacent free suballocations if applicable.
5416  // Returns iterator to new free suballocation at this place.
5417  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5418  // Given free suballocation, it inserts it into sorted list of
5419  // m_FreeSuballocationsBySize if it's suitable.
5420  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5421  // Given free suballocation, it removes it from sorted list of
5422  // m_FreeSuballocationsBySize if it's suitable.
5423  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5424 };
5425 
5426 /*
5427 Allocations and their references in internal data structure look like this:
5428 
5429 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5430 
5431  0 +-------+
5432  | |
5433  | |
5434  | |
5435  +-------+
5436  | Alloc | 1st[m_1stNullItemsBeginCount]
5437  +-------+
5438  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5439  +-------+
5440  | ... |
5441  +-------+
5442  | Alloc | 1st[1st.size() - 1]
5443  +-------+
5444  | |
5445  | |
5446  | |
5447 GetSize() +-------+
5448 
5449 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5450 
5451  0 +-------+
5452  | Alloc | 2nd[0]
5453  +-------+
5454  | Alloc | 2nd[1]
5455  +-------+
5456  | ... |
5457  +-------+
5458  | Alloc | 2nd[2nd.size() - 1]
5459  +-------+
5460  | |
5461  | |
5462  | |
5463  +-------+
5464  | Alloc | 1st[m_1stNullItemsBeginCount]
5465  +-------+
5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 1st[1st.size() - 1]
5471  +-------+
5472  | |
5473 GetSize() +-------+
5474 
5475 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5476 
5477  0 +-------+
5478  | |
5479  | |
5480  | |
5481  +-------+
5482  | Alloc | 1st[m_1stNullItemsBeginCount]
5483  +-------+
5484  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5485  +-------+
5486  | ... |
5487  +-------+
5488  | Alloc | 1st[1st.size() - 1]
5489  +-------+
5490  | |
5491  | |
5492  | |
5493  +-------+
5494  | Alloc | 2nd[2nd.size() - 1]
5495  +-------+
5496  | ... |
5497  +-------+
5498  | Alloc | 2nd[1]
5499  +-------+
5500  | Alloc | 2nd[0]
5501 GetSize() +-------+
5502 
5503 */
5504 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5505 {
5506  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5507 public:
5508  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5509  virtual ~VmaBlockMetadata_Linear();
5510  virtual void Init(VkDeviceSize size);
5511 
5512  virtual bool Validate() const;
5513  virtual size_t GetAllocationCount() const;
5514  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5515  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5516  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5517 
5518  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5519  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5520 
5521 #if VMA_STATS_STRING_ENABLED
5522  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5523 #endif
5524 
5525  virtual bool CreateAllocationRequest(
5526  uint32_t currentFrameIndex,
5527  uint32_t frameInUseCount,
5528  VkDeviceSize bufferImageGranularity,
5529  VkDeviceSize allocSize,
5530  VkDeviceSize allocAlignment,
5531  bool upperAddress,
5532  VmaSuballocationType allocType,
5533  bool canMakeOtherLost,
5534  uint32_t strategy,
5535  VmaAllocationRequest* pAllocationRequest);
5536 
5537  virtual bool MakeRequestedAllocationsLost(
5538  uint32_t currentFrameIndex,
5539  uint32_t frameInUseCount,
5540  VmaAllocationRequest* pAllocationRequest);
5541 
5542  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5543 
5544  virtual VkResult CheckCorruption(const void* pBlockData);
5545 
5546  virtual void Alloc(
5547  const VmaAllocationRequest& request,
5548  VmaSuballocationType type,
5549  VkDeviceSize allocSize,
5550  VmaAllocation hAllocation);
5551 
5552  virtual void Free(const VmaAllocation allocation);
5553  virtual void FreeAtOffset(VkDeviceSize offset);
5554 
5555 private:
5556  /*
5557  There are two suballocation vectors, used in ping-pong way.
5558  The one with index m_1stVectorIndex is called 1st.
5559  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5560  2nd can be non-empty only when 1st is not empty.
5561  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5562  */
5563  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5564 
5565  enum SECOND_VECTOR_MODE
5566  {
5567  SECOND_VECTOR_EMPTY,
5568  /*
5569  Suballocations in 2nd vector are created later than the ones in 1st, but they
5570  all have smaller offset.
5571  */
5572  SECOND_VECTOR_RING_BUFFER,
5573  /*
5574  Suballocations in 2nd vector are upper side of double stack.
5575  They all have offsets higher than those in 1st vector.
5576  Top of this stack means smaller offsets, but higher indices in this vector.
5577  */
5578  SECOND_VECTOR_DOUBLE_STACK,
5579  };
5580 
5581  VkDeviceSize m_SumFreeSize;
5582  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5583  uint32_t m_1stVectorIndex;
5584  SECOND_VECTOR_MODE m_2ndVectorMode;
5585 
5586  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5587  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5588  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5589  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5590 
5591  // Number of items in 1st vector with hAllocation = null at the beginning.
5592  size_t m_1stNullItemsBeginCount;
5593  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5594  size_t m_1stNullItemsMiddleCount;
5595  // Number of items in 2nd vector with hAllocation = null.
5596  size_t m_2ndNullItemsCount;
5597 
5598  bool ShouldCompact1st() const;
5599  void CleanupAfterFree();
5600 
5601  bool CreateAllocationRequest_LowerAddress(
5602  uint32_t currentFrameIndex,
5603  uint32_t frameInUseCount,
5604  VkDeviceSize bufferImageGranularity,
5605  VkDeviceSize allocSize,
5606  VkDeviceSize allocAlignment,
5607  VmaSuballocationType allocType,
5608  bool canMakeOtherLost,
5609  uint32_t strategy,
5610  VmaAllocationRequest* pAllocationRequest);
5611  bool CreateAllocationRequest_UpperAddress(
5612  uint32_t currentFrameIndex,
5613  uint32_t frameInUseCount,
5614  VkDeviceSize bufferImageGranularity,
5615  VkDeviceSize allocSize,
5616  VkDeviceSize allocAlignment,
5617  VmaSuballocationType allocType,
5618  bool canMakeOtherLost,
5619  uint32_t strategy,
5620  VmaAllocationRequest* pAllocationRequest);
5621 };
5622 
5623 /*
5624 - GetSize() is the original size of allocated memory block.
5625 - m_UsableSize is this size aligned down to a power of two.
5626  All allocations and calculations happen relative to m_UsableSize.
5627 - GetUnusableSize() is the difference between them.
5628  It is repoted as separate, unused range, not available for allocations.
5629 
5630 Node at level 0 has size = m_UsableSize.
5631 Each next level contains nodes with size 2 times smaller than current level.
5632 m_LevelCount is the maximum number of levels to use in the current object.
5633 */
5634 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5635 {
5636  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5637 public:
5638  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5639  virtual ~VmaBlockMetadata_Buddy();
5640  virtual void Init(VkDeviceSize size);
5641 
5642  virtual bool Validate() const;
5643  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5644  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5645  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5646  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5647 
5648  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5649  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5650 
5651 #if VMA_STATS_STRING_ENABLED
5652  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5653 #endif
5654 
5655  virtual bool CreateAllocationRequest(
5656  uint32_t currentFrameIndex,
5657  uint32_t frameInUseCount,
5658  VkDeviceSize bufferImageGranularity,
5659  VkDeviceSize allocSize,
5660  VkDeviceSize allocAlignment,
5661  bool upperAddress,
5662  VmaSuballocationType allocType,
5663  bool canMakeOtherLost,
5664  uint32_t strategy,
5665  VmaAllocationRequest* pAllocationRequest);
5666 
5667  virtual bool MakeRequestedAllocationsLost(
5668  uint32_t currentFrameIndex,
5669  uint32_t frameInUseCount,
5670  VmaAllocationRequest* pAllocationRequest);
5671 
5672  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5673 
5674  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5675 
5676  virtual void Alloc(
5677  const VmaAllocationRequest& request,
5678  VmaSuballocationType type,
5679  VkDeviceSize allocSize,
5680  VmaAllocation hAllocation);
5681 
5682  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5683  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5684 
5685 private:
5686  static const VkDeviceSize MIN_NODE_SIZE = 32;
5687  static const size_t MAX_LEVELS = 30;
5688 
5689  struct ValidationContext
5690  {
5691  size_t calculatedAllocationCount;
5692  size_t calculatedFreeCount;
5693  VkDeviceSize calculatedSumFreeSize;
5694 
5695  ValidationContext() :
5696  calculatedAllocationCount(0),
5697  calculatedFreeCount(0),
5698  calculatedSumFreeSize(0) { }
5699  };
5700 
5701  struct Node
5702  {
5703  VkDeviceSize offset;
5704  enum TYPE
5705  {
5706  TYPE_FREE,
5707  TYPE_ALLOCATION,
5708  TYPE_SPLIT,
5709  TYPE_COUNT
5710  } type;
5711  Node* parent;
5712  Node* buddy;
5713 
5714  union
5715  {
5716  struct
5717  {
5718  Node* prev;
5719  Node* next;
5720  } free;
5721  struct
5722  {
5723  VmaAllocation alloc;
5724  } allocation;
5725  struct
5726  {
5727  Node* leftChild;
5728  } split;
5729  };
5730  };
5731 
5732  // Size of the memory block aligned down to a power of two.
5733  VkDeviceSize m_UsableSize;
5734  uint32_t m_LevelCount;
5735 
5736  Node* m_Root;
5737  struct {
5738  Node* front;
5739  Node* back;
5740  } m_FreeList[MAX_LEVELS];
5741  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5742  size_t m_AllocationCount;
5743  // Number of nodes in the tree with type == TYPE_FREE.
5744  size_t m_FreeCount;
5745  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5746  VkDeviceSize m_SumFreeSize;
5747 
5748  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5749  void DeleteNode(Node* node);
5750  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5751  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5752  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5753  // Alloc passed just for validation. Can be null.
5754  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5755  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5756  // Adds node to the front of FreeList at given level.
5757  // node->type must be FREE.
5758  // node->free.prev, next can be undefined.
5759  void AddToFreeListFront(uint32_t level, Node* node);
5760  // Removes node from FreeList at given level.
5761  // node->type must be FREE.
5762  // node->free.prev, next stay untouched.
5763  void RemoveFromFreeList(uint32_t level, Node* node);
5764 
5765 #if VMA_STATS_STRING_ENABLED
5766  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5767 #endif
5768 };
5769 
5770 /*
5771 Represents a single block of device memory (`VkDeviceMemory`) with all the
5772 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5773 
5774 Thread-safety: This class must be externally synchronized.
5775 */
5776 class VmaDeviceMemoryBlock
5777 {
5778  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5779 public:
5780  VmaBlockMetadata* m_pMetadata;
5781 
5782  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5783 
5784  ~VmaDeviceMemoryBlock()
5785  {
5786  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5787  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5788  }
5789 
5790  // Always call after construction.
5791  void Init(
5792  VmaAllocator hAllocator,
5793  VmaPool hParentPool,
5794  uint32_t newMemoryTypeIndex,
5795  VkDeviceMemory newMemory,
5796  VkDeviceSize newSize,
5797  uint32_t id,
5798  uint32_t algorithm);
5799  // Always call before destruction.
5800  void Destroy(VmaAllocator allocator);
5801 
5802  VmaPool GetParentPool() const { return m_hParentPool; }
5803  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5804  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5805  uint32_t GetId() const { return m_Id; }
5806  void* GetMappedData() const { return m_pMappedData; }
5807 
5808  // Validates all data structures inside this object. If not valid, returns false.
5809  bool Validate() const;
5810 
5811  VkResult CheckCorruption(VmaAllocator hAllocator);
5812 
5813  // ppData can be null.
5814  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5815  void Unmap(VmaAllocator hAllocator, uint32_t count);
5816 
5817  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5818  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5819 
5820  VkResult BindBufferMemory(
5821  const VmaAllocator hAllocator,
5822  const VmaAllocation hAllocation,
5823  VkBuffer hBuffer);
5824  VkResult BindImageMemory(
5825  const VmaAllocator hAllocator,
5826  const VmaAllocation hAllocation,
5827  VkImage hImage);
5828 
5829 private:
5830  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5831  uint32_t m_MemoryTypeIndex;
5832  uint32_t m_Id;
5833  VkDeviceMemory m_hMemory;
5834 
5835  /*
5836  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5837  Also protects m_MapCount, m_pMappedData.
5838  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5839  */
5840  VMA_MUTEX m_Mutex;
5841  uint32_t m_MapCount;
5842  void* m_pMappedData;
5843 };
5844 
5845 struct VmaPointerLess
5846 {
5847  bool operator()(const void* lhs, const void* rhs) const
5848  {
5849  return lhs < rhs;
5850  }
5851 };
5852 
5853 struct VmaDefragmentationMove
5854 {
5855  size_t srcBlockIndex;
5856  size_t dstBlockIndex;
5857  VkDeviceSize srcOffset;
5858  VkDeviceSize dstOffset;
5859  VkDeviceSize size;
5860 };
5861 
5862 class VmaDefragmentationAlgorithm;
5863 
5864 /*
5865 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5866 Vulkan memory type.
5867 
5868 Synchronized internally with a mutex.
5869 */
5870 struct VmaBlockVector
5871 {
5872  VMA_CLASS_NO_COPY(VmaBlockVector)
5873 public:
5874  VmaBlockVector(
5875  VmaAllocator hAllocator,
5876  VmaPool hParentPool,
5877  uint32_t memoryTypeIndex,
5878  VkDeviceSize preferredBlockSize,
5879  size_t minBlockCount,
5880  size_t maxBlockCount,
5881  VkDeviceSize bufferImageGranularity,
5882  uint32_t frameInUseCount,
5883  bool isCustomPool,
5884  bool explicitBlockSize,
5885  uint32_t algorithm);
5886  ~VmaBlockVector();
5887 
5888  VkResult CreateMinBlocks();
5889 
5890  VmaPool GetParentPool() const { return m_hParentPool; }
5891  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5892  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5893  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5894  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5895  uint32_t GetAlgorithm() const { return m_Algorithm; }
5896 
5897  void GetPoolStats(VmaPoolStats* pStats);
5898 
5899  bool IsEmpty() const { return m_Blocks.empty(); }
5900  bool IsCorruptionDetectionEnabled() const;
5901 
5902  VkResult Allocate(
5903  uint32_t currentFrameIndex,
5904  VkDeviceSize size,
5905  VkDeviceSize alignment,
5906  const VmaAllocationCreateInfo& createInfo,
5907  VmaSuballocationType suballocType,
5908  size_t allocationCount,
5909  VmaAllocation* pAllocations);
5910 
5911  void Free(
5912  VmaAllocation hAllocation);
5913 
5914  // Adds statistics of this BlockVector to pStats.
5915  void AddStats(VmaStats* pStats);
5916 
5917 #if VMA_STATS_STRING_ENABLED
5918  void PrintDetailedMap(class VmaJsonWriter& json);
5919 #endif
5920 
5921  void MakePoolAllocationsLost(
5922  uint32_t currentFrameIndex,
5923  size_t* pLostAllocationCount);
5924  VkResult CheckCorruption();
5925 
5926  // Saves results in pCtx->res.
5927  void Defragment(
5928  class VmaBlockVectorDefragmentationContext* pCtx,
5929  VmaDefragmentationStats* pStats,
5930  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5931  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5932  VkCommandBuffer commandBuffer);
5933  void DefragmentationEnd(
5934  class VmaBlockVectorDefragmentationContext* pCtx,
5935  VmaDefragmentationStats* pStats);
5936 
5938  // To be used only while the m_Mutex is locked. Used during defragmentation.
5939 
5940  size_t GetBlockCount() const { return m_Blocks.size(); }
5941  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5942  size_t CalcAllocationCount() const;
5943  bool IsBufferImageGranularityConflictPossible() const;
5944 
5945 private:
5946  friend class VmaDefragmentationAlgorithm_Generic;
5947 
5948  const VmaAllocator m_hAllocator;
5949  const VmaPool m_hParentPool;
5950  const uint32_t m_MemoryTypeIndex;
5951  const VkDeviceSize m_PreferredBlockSize;
5952  const size_t m_MinBlockCount;
5953  const size_t m_MaxBlockCount;
5954  const VkDeviceSize m_BufferImageGranularity;
5955  const uint32_t m_FrameInUseCount;
5956  const bool m_IsCustomPool;
5957  const bool m_ExplicitBlockSize;
5958  const uint32_t m_Algorithm;
5959  /* There can be at most one allocation that is completely empty - a
5960  hysteresis to avoid pessimistic case of alternating creation and destruction
5961  of a VkDeviceMemory. */
5962  bool m_HasEmptyBlock;
5963  VMA_RW_MUTEX m_Mutex;
5964  // Incrementally sorted by sumFreeSize, ascending.
5965  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5966  uint32_t m_NextBlockId;
5967 
5968  VkDeviceSize CalcMaxBlockSize() const;
5969 
5970  // Finds and removes given block from vector.
5971  void Remove(VmaDeviceMemoryBlock* pBlock);
5972 
5973  // Performs single step in sorting m_Blocks. They may not be fully sorted
5974  // after this call.
5975  void IncrementallySortBlocks();
5976 
5977  VkResult AllocatePage(
5978  uint32_t currentFrameIndex,
5979  VkDeviceSize size,
5980  VkDeviceSize alignment,
5981  const VmaAllocationCreateInfo& createInfo,
5982  VmaSuballocationType suballocType,
5983  VmaAllocation* pAllocation);
5984 
5985  // To be used only without CAN_MAKE_OTHER_LOST flag.
5986  VkResult AllocateFromBlock(
5987  VmaDeviceMemoryBlock* pBlock,
5988  uint32_t currentFrameIndex,
5989  VkDeviceSize size,
5990  VkDeviceSize alignment,
5991  VmaAllocationCreateFlags allocFlags,
5992  void* pUserData,
5993  VmaSuballocationType suballocType,
5994  uint32_t strategy,
5995  VmaAllocation* pAllocation);
5996 
5997  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5998 
5999  // Saves result to pCtx->res.
6000  void ApplyDefragmentationMovesCpu(
6001  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6002  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6003  // Saves result to pCtx->res.
6004  void ApplyDefragmentationMovesGpu(
6005  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6006  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6007  VkCommandBuffer commandBuffer);
6008 
6009  /*
6010  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6011  - updated with new data.
6012  */
6013  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6014 };
6015 
6016 struct VmaPool_T
6017 {
6018  VMA_CLASS_NO_COPY(VmaPool_T)
6019 public:
6020  VmaBlockVector m_BlockVector;
6021 
6022  VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo,
6025  VkDeviceSize preferredBlockSize);
6026  ~VmaPool_T();
6027 
6028  uint32_t GetId() const { return m_Id; }
6029  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6030 
6031 #if VMA_STATS_STRING_ENABLED
6032  //void PrintDetailedMap(class VmaStringBuilder& sb);
6033 #endif
6034 
6035 private:
6036  uint32_t m_Id;
6037 };
6038 
6039 /*
6040 Performs defragmentation:
6041 
6042 - Updates `pBlockVector->m_pMetadata`.
6043 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6044 - Does not move actual data, only returns requested moves as `moves`.
6045 */
6046 class VmaDefragmentationAlgorithm
6047 {
6048  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6049 public:
6050  VmaDefragmentationAlgorithm(
6051  VmaAllocator hAllocator,
6052  VmaBlockVector* pBlockVector,
6053  uint32_t currentFrameIndex) :
6054  m_hAllocator(hAllocator),
6055  m_pBlockVector(pBlockVector),
6056  m_CurrentFrameIndex(currentFrameIndex)
6057  {
6058  }
6059  virtual ~VmaDefragmentationAlgorithm()
6060  {
6061  }
6062 
6063  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6064  virtual void AddAll() = 0;
6065 
6066  virtual VkResult Defragment(
6067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6068  VkDeviceSize maxBytesToMove,
6069  uint32_t maxAllocationsToMove) = 0;
6070 
6071  virtual VkDeviceSize GetBytesMoved() const = 0;
6072  virtual uint32_t GetAllocationsMoved() const = 0;
6073 
6074 protected:
6075  VmaAllocator const m_hAllocator;
6076  VmaBlockVector* const m_pBlockVector;
6077  const uint32_t m_CurrentFrameIndex;
6078 
6079  struct AllocationInfo
6080  {
6081  VmaAllocation m_hAllocation;
6082  VkBool32* m_pChanged;
6083 
6084  AllocationInfo() :
6085  m_hAllocation(VK_NULL_HANDLE),
6086  m_pChanged(VMA_NULL)
6087  {
6088  }
6089  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6090  m_hAllocation(hAlloc),
6091  m_pChanged(pChanged)
6092  {
6093  }
6094  };
6095 };
6096 
6097 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6098 {
6099  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6100 public:
6101  VmaDefragmentationAlgorithm_Generic(
6102  VmaAllocator hAllocator,
6103  VmaBlockVector* pBlockVector,
6104  uint32_t currentFrameIndex,
6105  bool overlappingMoveSupported);
6106  virtual ~VmaDefragmentationAlgorithm_Generic();
6107 
6108  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6109  virtual void AddAll() { m_AllAllocations = true; }
6110 
6111  virtual VkResult Defragment(
6112  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6113  VkDeviceSize maxBytesToMove,
6114  uint32_t maxAllocationsToMove);
6115 
6116  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6117  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6118 
6119 private:
6120  uint32_t m_AllocationCount;
6121  bool m_AllAllocations;
6122 
6123  VkDeviceSize m_BytesMoved;
6124  uint32_t m_AllocationsMoved;
6125 
6126  struct AllocationInfoSizeGreater
6127  {
6128  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6129  {
6130  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6131  }
6132  };
6133 
6134  struct AllocationInfoOffsetGreater
6135  {
6136  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6137  {
6138  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6139  }
6140  };
6141 
6142  struct BlockInfo
6143  {
6144  size_t m_OriginalBlockIndex;
6145  VmaDeviceMemoryBlock* m_pBlock;
6146  bool m_HasNonMovableAllocations;
6147  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6148 
6149  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6150  m_OriginalBlockIndex(SIZE_MAX),
6151  m_pBlock(VMA_NULL),
6152  m_HasNonMovableAllocations(true),
6153  m_Allocations(pAllocationCallbacks)
6154  {
6155  }
6156 
6157  void CalcHasNonMovableAllocations()
6158  {
6159  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6160  const size_t defragmentAllocCount = m_Allocations.size();
6161  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6162  }
6163 
6164  void SortAllocationsBySizeDescending()
6165  {
6166  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6167  }
6168 
6169  void SortAllocationsByOffsetDescending()
6170  {
6171  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6172  }
6173  };
6174 
6175  struct BlockPointerLess
6176  {
6177  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6178  {
6179  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6180  }
6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6182  {
6183  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6184  }
6185  };
6186 
6187  // 1. Blocks with some non-movable allocations go first.
6188  // 2. Blocks with smaller sumFreeSize go first.
6189  struct BlockInfoCompareMoveDestination
6190  {
6191  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6192  {
6193  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6194  {
6195  return true;
6196  }
6197  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6198  {
6199  return false;
6200  }
6201  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6202  {
6203  return true;
6204  }
6205  return false;
6206  }
6207  };
6208 
6209  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6210  BlockInfoVector m_Blocks;
6211 
6212  VkResult DefragmentRound(
6213  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6214  VkDeviceSize maxBytesToMove,
6215  uint32_t maxAllocationsToMove);
6216 
6217  size_t CalcBlocksWithNonMovableCount() const;
6218 
6219  static bool MoveMakesSense(
6220  size_t dstBlockIndex, VkDeviceSize dstOffset,
6221  size_t srcBlockIndex, VkDeviceSize srcOffset);
6222 };
6223 
6224 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6225 {
6226  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6227 public:
6228  VmaDefragmentationAlgorithm_Fast(
6229  VmaAllocator hAllocator,
6230  VmaBlockVector* pBlockVector,
6231  uint32_t currentFrameIndex,
6232  bool overlappingMoveSupported);
6233  virtual ~VmaDefragmentationAlgorithm_Fast();
6234 
6235  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6236  virtual void AddAll() { m_AllAllocations = true; }
6237 
6238  virtual VkResult Defragment(
6239  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6240  VkDeviceSize maxBytesToMove,
6241  uint32_t maxAllocationsToMove);
6242 
6243  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6244  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6245 
6246 private:
6247  struct BlockInfo
6248  {
6249  size_t origBlockIndex;
6250  };
6251 
6252  class FreeSpaceDatabase
6253  {
6254  public:
6255  FreeSpaceDatabase()
6256  {
6257  FreeSpace s = {};
6258  s.blockInfoIndex = SIZE_MAX;
6259  for(size_t i = 0; i < MAX_COUNT; ++i)
6260  {
6261  m_FreeSpaces[i] = s;
6262  }
6263  }
6264 
6265  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6266  {
6267  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6268  {
6269  return;
6270  }
6271 
6272  // Find first invalid or the smallest structure.
6273  size_t bestIndex = SIZE_MAX;
6274  for(size_t i = 0; i < MAX_COUNT; ++i)
6275  {
6276  // Empty structure.
6277  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6278  {
6279  bestIndex = i;
6280  break;
6281  }
6282  if(m_FreeSpaces[i].size < size &&
6283  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6284  {
6285  bestIndex = i;
6286  }
6287  }
6288 
6289  if(bestIndex != SIZE_MAX)
6290  {
6291  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6292  m_FreeSpaces[bestIndex].offset = offset;
6293  m_FreeSpaces[bestIndex].size = size;
6294  }
6295  }
6296 
6297  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6298  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6299  {
6300  size_t bestIndex = SIZE_MAX;
6301  VkDeviceSize bestFreeSpaceAfter = 0;
6302  for(size_t i = 0; i < MAX_COUNT; ++i)
6303  {
6304  // Structure is valid.
6305  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6306  {
6307  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6308  // Allocation fits into this structure.
6309  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6310  {
6311  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6312  (dstOffset + size);
6313  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6314  {
6315  bestIndex = i;
6316  bestFreeSpaceAfter = freeSpaceAfter;
6317  }
6318  }
6319  }
6320  }
6321 
6322  if(bestIndex != SIZE_MAX)
6323  {
6324  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6325  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6326 
6327  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6328  {
6329  // Leave this structure for remaining empty space.
6330  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6331  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6332  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6333  }
6334  else
6335  {
6336  // This structure becomes invalid.
6337  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6338  }
6339 
6340  return true;
6341  }
6342 
6343  return false;
6344  }
6345 
6346  private:
6347  static const size_t MAX_COUNT = 4;
6348 
6349  struct FreeSpace
6350  {
6351  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6352  VkDeviceSize offset;
6353  VkDeviceSize size;
6354  } m_FreeSpaces[MAX_COUNT];
6355  };
6356 
6357  const bool m_OverlappingMoveSupported;
6358 
6359  uint32_t m_AllocationCount;
6360  bool m_AllAllocations;
6361 
6362  VkDeviceSize m_BytesMoved;
6363  uint32_t m_AllocationsMoved;
6364 
6365  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6366 
6367  void PreprocessMetadata();
6368  void PostprocessMetadata();
6369  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6370 };
6371 
6372 struct VmaBlockDefragmentationContext
6373 {
6374  enum BLOCK_FLAG
6375  {
6376  BLOCK_FLAG_USED = 0x00000001,
6377  };
6378  uint32_t flags;
6379  VkBuffer hBuffer;
6380 
6381  VmaBlockDefragmentationContext() :
6382  flags(0),
6383  hBuffer(VK_NULL_HANDLE)
6384  {
6385  }
6386 };
6387 
6388 class VmaBlockVectorDefragmentationContext
6389 {
6390  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6391 public:
6392  VkResult res;
6393  bool mutexLocked;
6394  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6395 
6396  VmaBlockVectorDefragmentationContext(
6397  VmaAllocator hAllocator,
6398  VmaPool hCustomPool, // Optional.
6399  VmaBlockVector* pBlockVector,
6400  uint32_t currFrameIndex,
6401  uint32_t flags);
6402  ~VmaBlockVectorDefragmentationContext();
6403 
6404  VmaPool GetCustomPool() const { return m_hCustomPool; }
6405  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6406  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6407 
6408  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6409  void AddAll() { m_AllAllocations = true; }
6410 
6411  void Begin(bool overlappingMoveSupported);
6412 
6413 private:
6414  const VmaAllocator m_hAllocator;
6415  // Null if not from custom pool.
6416  const VmaPool m_hCustomPool;
6417  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6418  VmaBlockVector* const m_pBlockVector;
6419  const uint32_t m_CurrFrameIndex;
6420  const uint32_t m_AlgorithmFlags;
6421  // Owner of this object.
6422  VmaDefragmentationAlgorithm* m_pAlgorithm;
6423 
6424  struct AllocInfo
6425  {
6426  VmaAllocation hAlloc;
6427  VkBool32* pChanged;
6428  };
6429  // Used between constructor and Begin.
6430  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6431  bool m_AllAllocations;
6432 };
6433 
6434 struct VmaDefragmentationContext_T
6435 {
6436 private:
6437  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6438 public:
6439  VmaDefragmentationContext_T(
6440  VmaAllocator hAllocator,
6441  uint32_t currFrameIndex,
6442  uint32_t flags,
6443  VmaDefragmentationStats* pStats);
6444  ~VmaDefragmentationContext_T();
6445 
6446  void AddPools(uint32_t poolCount, VmaPool* pPools);
6447  void AddAllocations(
6448  uint32_t allocationCount,
6449  VmaAllocation* pAllocations,
6450  VkBool32* pAllocationsChanged);
6451 
6452  /*
6453  Returns:
6454  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6455  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6456  - Negative value if error occured and object can be destroyed immediately.
6457  */
6458  VkResult Defragment(
6459  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6460  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6461  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6462 
6463 private:
6464  const VmaAllocator m_hAllocator;
6465  const uint32_t m_CurrFrameIndex;
6466  const uint32_t m_Flags;
6467  VmaDefragmentationStats* const m_pStats;
6468  // Owner of these objects.
6469  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6470  // Owner of these objects.
6471  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6472 };
6473 
6474 #if VMA_RECORDING_ENABLED
6475 
6476 class VmaRecorder
6477 {
6478 public:
6479  VmaRecorder();
6480  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6481  void WriteConfiguration(
6482  const VkPhysicalDeviceProperties& devProps,
6483  const VkPhysicalDeviceMemoryProperties& memProps,
6484  bool dedicatedAllocationExtensionEnabled);
6485  ~VmaRecorder();
6486 
6487  void RecordCreateAllocator(uint32_t frameIndex);
6488  void RecordDestroyAllocator(uint32_t frameIndex);
6489  void RecordCreatePool(uint32_t frameIndex,
6490  const VmaPoolCreateInfo& createInfo,
6491  VmaPool pool);
6492  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6493  void RecordAllocateMemory(uint32_t frameIndex,
6494  const VkMemoryRequirements& vkMemReq,
6495  const VmaAllocationCreateInfo& createInfo,
6496  VmaAllocation allocation);
6497  void RecordAllocateMemoryPages(uint32_t frameIndex,
6498  const VkMemoryRequirements& vkMemReq,
6499  const VmaAllocationCreateInfo& createInfo,
6500  uint64_t allocationCount,
6501  const VmaAllocation* pAllocations);
6502  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6503  const VkMemoryRequirements& vkMemReq,
6504  bool requiresDedicatedAllocation,
6505  bool prefersDedicatedAllocation,
6506  const VmaAllocationCreateInfo& createInfo,
6507  VmaAllocation allocation);
6508  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6509  const VkMemoryRequirements& vkMemReq,
6510  bool requiresDedicatedAllocation,
6511  bool prefersDedicatedAllocation,
6512  const VmaAllocationCreateInfo& createInfo,
6513  VmaAllocation allocation);
6514  void RecordFreeMemory(uint32_t frameIndex,
6515  VmaAllocation allocation);
6516  void RecordFreeMemoryPages(uint32_t frameIndex,
6517  uint64_t allocationCount,
6518  const VmaAllocation* pAllocations);
6519  void RecordResizeAllocation(
6520  uint32_t frameIndex,
6521  VmaAllocation allocation,
6522  VkDeviceSize newSize);
6523  void RecordSetAllocationUserData(uint32_t frameIndex,
6524  VmaAllocation allocation,
6525  const void* pUserData);
6526  void RecordCreateLostAllocation(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordMapMemory(uint32_t frameIndex,
6529  VmaAllocation allocation);
6530  void RecordUnmapMemory(uint32_t frameIndex,
6531  VmaAllocation allocation);
6532  void RecordFlushAllocation(uint32_t frameIndex,
6533  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6534  void RecordInvalidateAllocation(uint32_t frameIndex,
6535  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6536  void RecordCreateBuffer(uint32_t frameIndex,
6537  const VkBufferCreateInfo& bufCreateInfo,
6538  const VmaAllocationCreateInfo& allocCreateInfo,
6539  VmaAllocation allocation);
6540  void RecordCreateImage(uint32_t frameIndex,
6541  const VkImageCreateInfo& imageCreateInfo,
6542  const VmaAllocationCreateInfo& allocCreateInfo,
6543  VmaAllocation allocation);
6544  void RecordDestroyBuffer(uint32_t frameIndex,
6545  VmaAllocation allocation);
6546  void RecordDestroyImage(uint32_t frameIndex,
6547  VmaAllocation allocation);
6548  void RecordTouchAllocation(uint32_t frameIndex,
6549  VmaAllocation allocation);
6550  void RecordGetAllocationInfo(uint32_t frameIndex,
6551  VmaAllocation allocation);
6552  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6553  VmaPool pool);
6554  void RecordDefragmentationBegin(uint32_t frameIndex,
6555  const VmaDefragmentationInfo2& info,
6557  void RecordDefragmentationEnd(uint32_t frameIndex,
6559 
6560 private:
6561  struct CallParams
6562  {
6563  uint32_t threadId;
6564  double time;
6565  };
6566 
6567  class UserDataString
6568  {
6569  public:
6570  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6571  const char* GetString() const { return m_Str; }
6572 
6573  private:
6574  char m_PtrStr[17];
6575  const char* m_Str;
6576  };
6577 
6578  bool m_UseMutex;
6579  VmaRecordFlags m_Flags;
6580  FILE* m_File;
6581  VMA_MUTEX m_FileMutex;
6582  int64_t m_Freq;
6583  int64_t m_StartCounter;
6584 
6585  void GetBasicParams(CallParams& outParams);
6586 
6587  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6588  template<typename T>
6589  void PrintPointerList(uint64_t count, const T* pItems)
6590  {
6591  if(count)
6592  {
6593  fprintf(m_File, "%p", pItems[0]);
6594  for(uint64_t i = 1; i < count; ++i)
6595  {
6596  fprintf(m_File, " %p", pItems[i]);
6597  }
6598  }
6599  }
6600 
6601  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6602  void Flush();
6603 };
6604 
6605 #endif // #if VMA_RECORDING_ENABLED
6606 
6607 /*
6608 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6609 */
6610 class VmaAllocationObjectAllocator
6611 {
6612  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6613 public:
6614  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6615 
6616  VmaAllocation Allocate();
6617  void Free(VmaAllocation hAlloc);
6618 
6619 private:
6620  VMA_MUTEX m_Mutex;
6621  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6622 };
6623 
6624 // Main allocator object.
6625 struct VmaAllocator_T
6626 {
6627  VMA_CLASS_NO_COPY(VmaAllocator_T)
6628 public:
6629  bool m_UseMutex;
6630  bool m_UseKhrDedicatedAllocation;
6631  VkDevice m_hDevice;
6632  bool m_AllocationCallbacksSpecified;
6633  VkAllocationCallbacks m_AllocationCallbacks;
6634  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6635  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6636 
6637  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6638  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6639  VMA_MUTEX m_HeapSizeLimitMutex;
6640 
6641  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6642  VkPhysicalDeviceMemoryProperties m_MemProps;
6643 
6644  // Default pools.
6645  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6646 
6647  // Each vector is sorted by memory (handle value).
6648  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6649  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6650  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6651 
6652  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6653  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6654  ~VmaAllocator_T();
6655 
6656  const VkAllocationCallbacks* GetAllocationCallbacks() const
6657  {
6658  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6659  }
6660  const VmaVulkanFunctions& GetVulkanFunctions() const
6661  {
6662  return m_VulkanFunctions;
6663  }
6664 
6665  VkDeviceSize GetBufferImageGranularity() const
6666  {
6667  return VMA_MAX(
6668  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6669  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6670  }
6671 
6672  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6673  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6674 
6675  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6676  {
6677  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6678  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6679  }
6680  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6681  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6682  {
6683  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6684  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6685  }
6686  // Minimum alignment for all allocations in specific memory type.
6687  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6688  {
6689  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6690  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6691  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6692  }
6693 
6694  bool IsIntegratedGpu() const
6695  {
6696  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6697  }
6698 
6699 #if VMA_RECORDING_ENABLED
6700  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6701 #endif
6702 
6703  void GetBufferMemoryRequirements(
6704  VkBuffer hBuffer,
6705  VkMemoryRequirements& memReq,
6706  bool& requiresDedicatedAllocation,
6707  bool& prefersDedicatedAllocation) const;
6708  void GetImageMemoryRequirements(
6709  VkImage hImage,
6710  VkMemoryRequirements& memReq,
6711  bool& requiresDedicatedAllocation,
6712  bool& prefersDedicatedAllocation) const;
6713 
6714  // Main allocation function.
6715  VkResult AllocateMemory(
6716  const VkMemoryRequirements& vkMemReq,
6717  bool requiresDedicatedAllocation,
6718  bool prefersDedicatedAllocation,
6719  VkBuffer dedicatedBuffer,
6720  VkImage dedicatedImage,
6721  const VmaAllocationCreateInfo& createInfo,
6722  VmaSuballocationType suballocType,
6723  size_t allocationCount,
6724  VmaAllocation* pAllocations);
6725 
6726  // Main deallocation function.
6727  void FreeMemory(
6728  size_t allocationCount,
6729  const VmaAllocation* pAllocations);
6730 
6731  VkResult ResizeAllocation(
6732  const VmaAllocation alloc,
6733  VkDeviceSize newSize);
6734 
6735  void CalculateStats(VmaStats* pStats);
6736 
6737 #if VMA_STATS_STRING_ENABLED
6738  void PrintDetailedMap(class VmaJsonWriter& json);
6739 #endif
6740 
6741  VkResult DefragmentationBegin(
6742  const VmaDefragmentationInfo2& info,
6743  VmaDefragmentationStats* pStats,
6744  VmaDefragmentationContext* pContext);
6745  VkResult DefragmentationEnd(
6746  VmaDefragmentationContext context);
6747 
6748  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6749  bool TouchAllocation(VmaAllocation hAllocation);
6750 
6751  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6752  void DestroyPool(VmaPool pool);
6753  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6754 
6755  void SetCurrentFrameIndex(uint32_t frameIndex);
6756  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6757 
6758  void MakePoolAllocationsLost(
6759  VmaPool hPool,
6760  size_t* pLostAllocationCount);
6761  VkResult CheckPoolCorruption(VmaPool hPool);
6762  VkResult CheckCorruption(uint32_t memoryTypeBits);
6763 
6764  void CreateLostAllocation(VmaAllocation* pAllocation);
6765 
6766  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6767  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6768 
6769  VkResult Map(VmaAllocation hAllocation, void** ppData);
6770  void Unmap(VmaAllocation hAllocation);
6771 
6772  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6773  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6774 
6775  void FlushOrInvalidateAllocation(
6776  VmaAllocation hAllocation,
6777  VkDeviceSize offset, VkDeviceSize size,
6778  VMA_CACHE_OPERATION op);
6779 
6780  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6781 
6782 private:
6783  VkDeviceSize m_PreferredLargeHeapBlockSize;
6784 
6785  VkPhysicalDevice m_PhysicalDevice;
6786  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6787 
6788  VMA_RW_MUTEX m_PoolsMutex;
6789  // Protected by m_PoolsMutex. Sorted by pointer value.
6790  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6791  uint32_t m_NextPoolId;
6792 
6793  VmaVulkanFunctions m_VulkanFunctions;
6794 
6795 #if VMA_RECORDING_ENABLED
6796  VmaRecorder* m_pRecorder;
6797 #endif
6798 
6799  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6800 
6801  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6802 
6803  VkResult AllocateMemoryOfType(
6804  VkDeviceSize size,
6805  VkDeviceSize alignment,
6806  bool dedicatedAllocation,
6807  VkBuffer dedicatedBuffer,
6808  VkImage dedicatedImage,
6809  const VmaAllocationCreateInfo& createInfo,
6810  uint32_t memTypeIndex,
6811  VmaSuballocationType suballocType,
6812  size_t allocationCount,
6813  VmaAllocation* pAllocations);
6814 
6815  // Helper function only to be used inside AllocateDedicatedMemory.
6816  VkResult AllocateDedicatedMemoryPage(
6817  VkDeviceSize size,
6818  VmaSuballocationType suballocType,
6819  uint32_t memTypeIndex,
6820  const VkMemoryAllocateInfo& allocInfo,
6821  bool map,
6822  bool isUserDataString,
6823  void* pUserData,
6824  VmaAllocation* pAllocation);
6825 
6826  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6827  VkResult AllocateDedicatedMemory(
6828  VkDeviceSize size,
6829  VmaSuballocationType suballocType,
6830  uint32_t memTypeIndex,
6831  bool map,
6832  bool isUserDataString,
6833  void* pUserData,
6834  VkBuffer dedicatedBuffer,
6835  VkImage dedicatedImage,
6836  size_t allocationCount,
6837  VmaAllocation* pAllocations);
6838 
6839  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6840  void FreeDedicatedMemory(VmaAllocation allocation);
6841 };
6842 
6844 // Memory allocation #2 after VmaAllocator_T definition
6845 
6846 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6847 {
6848  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6849 }
6850 
6851 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6852 {
6853  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6854 }
6855 
6856 template<typename T>
6857 static T* VmaAllocate(VmaAllocator hAllocator)
6858 {
6859  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6860 }
6861 
6862 template<typename T>
6863 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6864 {
6865  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6866 }
6867 
6868 template<typename T>
6869 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6870 {
6871  if(ptr != VMA_NULL)
6872  {
6873  ptr->~T();
6874  VmaFree(hAllocator, ptr);
6875  }
6876 }
6877 
6878 template<typename T>
6879 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6880 {
6881  if(ptr != VMA_NULL)
6882  {
6883  for(size_t i = count; i--; )
6884  ptr[i].~T();
6885  VmaFree(hAllocator, ptr);
6886  }
6887 }
6888 
6890 // VmaStringBuilder
6891 
6892 #if VMA_STATS_STRING_ENABLED
6893 
6894 class VmaStringBuilder
6895 {
6896 public:
6897  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6898  size_t GetLength() const { return m_Data.size(); }
6899  const char* GetData() const { return m_Data.data(); }
6900 
6901  void Add(char ch) { m_Data.push_back(ch); }
6902  void Add(const char* pStr);
6903  void AddNewLine() { Add('\n'); }
6904  void AddNumber(uint32_t num);
6905  void AddNumber(uint64_t num);
6906  void AddPointer(const void* ptr);
6907 
6908 private:
6909  VmaVector< char, VmaStlAllocator<char> > m_Data;
6910 };
6911 
6912 void VmaStringBuilder::Add(const char* pStr)
6913 {
6914  const size_t strLen = strlen(pStr);
6915  if(strLen > 0)
6916  {
6917  const size_t oldCount = m_Data.size();
6918  m_Data.resize(oldCount + strLen);
6919  memcpy(m_Data.data() + oldCount, pStr, strLen);
6920  }
6921 }
6922 
6923 void VmaStringBuilder::AddNumber(uint32_t num)
6924 {
6925  char buf[11];
6926  VmaUint32ToStr(buf, sizeof(buf), num);
6927  Add(buf);
6928 }
6929 
6930 void VmaStringBuilder::AddNumber(uint64_t num)
6931 {
6932  char buf[21];
6933  VmaUint64ToStr(buf, sizeof(buf), num);
6934  Add(buf);
6935 }
6936 
6937 void VmaStringBuilder::AddPointer(const void* ptr)
6938 {
6939  char buf[21];
6940  VmaPtrToStr(buf, sizeof(buf), ptr);
6941  Add(buf);
6942 }
6943 
6944 #endif // #if VMA_STATS_STRING_ENABLED
6945 
6947 // VmaJsonWriter
6948 
6949 #if VMA_STATS_STRING_ENABLED
6950 
6951 class VmaJsonWriter
6952 {
6953  VMA_CLASS_NO_COPY(VmaJsonWriter)
6954 public:
6955  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6956  ~VmaJsonWriter();
6957 
6958  void BeginObject(bool singleLine = false);
6959  void EndObject();
6960 
6961  void BeginArray(bool singleLine = false);
6962  void EndArray();
6963 
6964  void WriteString(const char* pStr);
6965  void BeginString(const char* pStr = VMA_NULL);
6966  void ContinueString(const char* pStr);
6967  void ContinueString(uint32_t n);
6968  void ContinueString(uint64_t n);
6969  void ContinueString_Pointer(const void* ptr);
6970  void EndString(const char* pStr = VMA_NULL);
6971 
6972  void WriteNumber(uint32_t n);
6973  void WriteNumber(uint64_t n);
6974  void WriteBool(bool b);
6975  void WriteNull();
6976 
6977 private:
6978  static const char* const INDENT;
6979 
6980  enum COLLECTION_TYPE
6981  {
6982  COLLECTION_TYPE_OBJECT,
6983  COLLECTION_TYPE_ARRAY,
6984  };
6985  struct StackItem
6986  {
6987  COLLECTION_TYPE type;
6988  uint32_t valueCount;
6989  bool singleLineMode;
6990  };
6991 
6992  VmaStringBuilder& m_SB;
6993  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6994  bool m_InsideString;
6995 
6996  void BeginValue(bool isString);
6997  void WriteIndent(bool oneLess = false);
6998 };
6999 
7000 const char* const VmaJsonWriter::INDENT = " ";
7001 
7002 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7003  m_SB(sb),
7004  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7005  m_InsideString(false)
7006 {
7007 }
7008 
7009 VmaJsonWriter::~VmaJsonWriter()
7010 {
7011  VMA_ASSERT(!m_InsideString);
7012  VMA_ASSERT(m_Stack.empty());
7013 }
7014 
7015 void VmaJsonWriter::BeginObject(bool singleLine)
7016 {
7017  VMA_ASSERT(!m_InsideString);
7018 
7019  BeginValue(false);
7020  m_SB.Add('{');
7021 
7022  StackItem item;
7023  item.type = COLLECTION_TYPE_OBJECT;
7024  item.valueCount = 0;
7025  item.singleLineMode = singleLine;
7026  m_Stack.push_back(item);
7027 }
7028 
7029 void VmaJsonWriter::EndObject()
7030 {
7031  VMA_ASSERT(!m_InsideString);
7032 
7033  WriteIndent(true);
7034  m_SB.Add('}');
7035 
7036  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7037  m_Stack.pop_back();
7038 }
7039 
7040 void VmaJsonWriter::BeginArray(bool singleLine)
7041 {
7042  VMA_ASSERT(!m_InsideString);
7043 
7044  BeginValue(false);
7045  m_SB.Add('[');
7046 
7047  StackItem item;
7048  item.type = COLLECTION_TYPE_ARRAY;
7049  item.valueCount = 0;
7050  item.singleLineMode = singleLine;
7051  m_Stack.push_back(item);
7052 }
7053 
7054 void VmaJsonWriter::EndArray()
7055 {
7056  VMA_ASSERT(!m_InsideString);
7057 
7058  WriteIndent(true);
7059  m_SB.Add(']');
7060 
7061  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7062  m_Stack.pop_back();
7063 }
7064 
7065 void VmaJsonWriter::WriteString(const char* pStr)
7066 {
7067  BeginString(pStr);
7068  EndString();
7069 }
7070 
7071 void VmaJsonWriter::BeginString(const char* pStr)
7072 {
7073  VMA_ASSERT(!m_InsideString);
7074 
7075  BeginValue(true);
7076  m_SB.Add('"');
7077  m_InsideString = true;
7078  if(pStr != VMA_NULL && pStr[0] != '\0')
7079  {
7080  ContinueString(pStr);
7081  }
7082 }
7083 
7084 void VmaJsonWriter::ContinueString(const char* pStr)
7085 {
7086  VMA_ASSERT(m_InsideString);
7087 
7088  const size_t strLen = strlen(pStr);
7089  for(size_t i = 0; i < strLen; ++i)
7090  {
7091  char ch = pStr[i];
7092  if(ch == '\\')
7093  {
7094  m_SB.Add("\\\\");
7095  }
7096  else if(ch == '"')
7097  {
7098  m_SB.Add("\\\"");
7099  }
7100  else if(ch >= 32)
7101  {
7102  m_SB.Add(ch);
7103  }
7104  else switch(ch)
7105  {
7106  case '\b':
7107  m_SB.Add("\\b");
7108  break;
7109  case '\f':
7110  m_SB.Add("\\f");
7111  break;
7112  case '\n':
7113  m_SB.Add("\\n");
7114  break;
7115  case '\r':
7116  m_SB.Add("\\r");
7117  break;
7118  case '\t':
7119  m_SB.Add("\\t");
7120  break;
7121  default:
7122  VMA_ASSERT(0 && "Character not currently supported.");
7123  break;
7124  }
7125  }
7126 }
7127 
7128 void VmaJsonWriter::ContinueString(uint32_t n)
7129 {
7130  VMA_ASSERT(m_InsideString);
7131  m_SB.AddNumber(n);
7132 }
7133 
7134 void VmaJsonWriter::ContinueString(uint64_t n)
7135 {
7136  VMA_ASSERT(m_InsideString);
7137  m_SB.AddNumber(n);
7138 }
7139 
7140 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7141 {
7142  VMA_ASSERT(m_InsideString);
7143  m_SB.AddPointer(ptr);
7144 }
7145 
7146 void VmaJsonWriter::EndString(const char* pStr)
7147 {
7148  VMA_ASSERT(m_InsideString);
7149  if(pStr != VMA_NULL && pStr[0] != '\0')
7150  {
7151  ContinueString(pStr);
7152  }
7153  m_SB.Add('"');
7154  m_InsideString = false;
7155 }
7156 
7157 void VmaJsonWriter::WriteNumber(uint32_t n)
7158 {
7159  VMA_ASSERT(!m_InsideString);
7160  BeginValue(false);
7161  m_SB.AddNumber(n);
7162 }
7163 
7164 void VmaJsonWriter::WriteNumber(uint64_t n)
7165 {
7166  VMA_ASSERT(!m_InsideString);
7167  BeginValue(false);
7168  m_SB.AddNumber(n);
7169 }
7170 
7171 void VmaJsonWriter::WriteBool(bool b)
7172 {
7173  VMA_ASSERT(!m_InsideString);
7174  BeginValue(false);
7175  m_SB.Add(b ? "true" : "false");
7176 }
7177 
7178 void VmaJsonWriter::WriteNull()
7179 {
7180  VMA_ASSERT(!m_InsideString);
7181  BeginValue(false);
7182  m_SB.Add("null");
7183 }
7184 
7185 void VmaJsonWriter::BeginValue(bool isString)
7186 {
7187  if(!m_Stack.empty())
7188  {
7189  StackItem& currItem = m_Stack.back();
7190  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7191  currItem.valueCount % 2 == 0)
7192  {
7193  VMA_ASSERT(isString);
7194  }
7195 
7196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7197  currItem.valueCount % 2 != 0)
7198  {
7199  m_SB.Add(": ");
7200  }
7201  else if(currItem.valueCount > 0)
7202  {
7203  m_SB.Add(", ");
7204  WriteIndent();
7205  }
7206  else
7207  {
7208  WriteIndent();
7209  }
7210  ++currItem.valueCount;
7211  }
7212 }
7213 
7214 void VmaJsonWriter::WriteIndent(bool oneLess)
7215 {
7216  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7217  {
7218  m_SB.AddNewLine();
7219 
7220  size_t count = m_Stack.size();
7221  if(count > 0 && oneLess)
7222  {
7223  --count;
7224  }
7225  for(size_t i = 0; i < count; ++i)
7226  {
7227  m_SB.Add(INDENT);
7228  }
7229  }
7230 }
7231 
7232 #endif // #if VMA_STATS_STRING_ENABLED
7233 
7235 
7236 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7237 {
7238  if(IsUserDataString())
7239  {
7240  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7241 
7242  FreeUserDataString(hAllocator);
7243 
7244  if(pUserData != VMA_NULL)
7245  {
7246  const char* const newStrSrc = (char*)pUserData;
7247  const size_t newStrLen = strlen(newStrSrc);
7248  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7249  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7250  m_pUserData = newStrDst;
7251  }
7252  }
7253  else
7254  {
7255  m_pUserData = pUserData;
7256  }
7257 }
7258 
7259 void VmaAllocation_T::ChangeBlockAllocation(
7260  VmaAllocator hAllocator,
7261  VmaDeviceMemoryBlock* block,
7262  VkDeviceSize offset)
7263 {
7264  VMA_ASSERT(block != VMA_NULL);
7265  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7266 
7267  // Move mapping reference counter from old block to new block.
7268  if(block != m_BlockAllocation.m_Block)
7269  {
7270  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7271  if(IsPersistentMap())
7272  ++mapRefCount;
7273  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7274  block->Map(hAllocator, mapRefCount, VMA_NULL);
7275  }
7276 
7277  m_BlockAllocation.m_Block = block;
7278  m_BlockAllocation.m_Offset = offset;
7279 }
7280 
7281 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7282 {
7283  VMA_ASSERT(newSize > 0);
7284  m_Size = newSize;
7285 }
7286 
7287 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7288 {
7289  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7290  m_BlockAllocation.m_Offset = newOffset;
7291 }
7292 
7293 VkDeviceSize VmaAllocation_T::GetOffset() const
7294 {
7295  switch(m_Type)
7296  {
7297  case ALLOCATION_TYPE_BLOCK:
7298  return m_BlockAllocation.m_Offset;
7299  case ALLOCATION_TYPE_DEDICATED:
7300  return 0;
7301  default:
7302  VMA_ASSERT(0);
7303  return 0;
7304  }
7305 }
7306 
7307 VkDeviceMemory VmaAllocation_T::GetMemory() const
7308 {
7309  switch(m_Type)
7310  {
7311  case ALLOCATION_TYPE_BLOCK:
7312  return m_BlockAllocation.m_Block->GetDeviceMemory();
7313  case ALLOCATION_TYPE_DEDICATED:
7314  return m_DedicatedAllocation.m_hMemory;
7315  default:
7316  VMA_ASSERT(0);
7317  return VK_NULL_HANDLE;
7318  }
7319 }
7320 
7321 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7322 {
7323  switch(m_Type)
7324  {
7325  case ALLOCATION_TYPE_BLOCK:
7326  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7327  case ALLOCATION_TYPE_DEDICATED:
7328  return m_DedicatedAllocation.m_MemoryTypeIndex;
7329  default:
7330  VMA_ASSERT(0);
7331  return UINT32_MAX;
7332  }
7333 }
7334 
7335 void* VmaAllocation_T::GetMappedData() const
7336 {
7337  switch(m_Type)
7338  {
7339  case ALLOCATION_TYPE_BLOCK:
7340  if(m_MapCount != 0)
7341  {
7342  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7343  VMA_ASSERT(pBlockData != VMA_NULL);
7344  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7345  }
7346  else
7347  {
7348  return VMA_NULL;
7349  }
7350  break;
7351  case ALLOCATION_TYPE_DEDICATED:
7352  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7353  return m_DedicatedAllocation.m_pMappedData;
7354  default:
7355  VMA_ASSERT(0);
7356  return VMA_NULL;
7357  }
7358 }
7359 
7360 bool VmaAllocation_T::CanBecomeLost() const
7361 {
7362  switch(m_Type)
7363  {
7364  case ALLOCATION_TYPE_BLOCK:
7365  return m_BlockAllocation.m_CanBecomeLost;
7366  case ALLOCATION_TYPE_DEDICATED:
7367  return false;
7368  default:
7369  VMA_ASSERT(0);
7370  return false;
7371  }
7372 }
7373 
7374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7375 {
7376  VMA_ASSERT(CanBecomeLost());
7377 
7378  /*
7379  Warning: This is a carefully designed algorithm.
7380  Do not modify unless you really know what you're doing :)
7381  */
7382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7383  for(;;)
7384  {
7385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7386  {
7387  VMA_ASSERT(0);
7388  return false;
7389  }
7390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7391  {
7392  return false;
7393  }
7394  else // Last use time earlier than current time.
7395  {
7396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7397  {
7398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7400  return true;
7401  }
7402  }
7403  }
7404 }
7405 
7406 #if VMA_STATS_STRING_ENABLED
7407 
7408 // Correspond to values of enum VmaSuballocationType.
7409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7410  "FREE",
7411  "UNKNOWN",
7412  "BUFFER",
7413  "IMAGE_UNKNOWN",
7414  "IMAGE_LINEAR",
7415  "IMAGE_OPTIMAL",
7416 };
7417 
7418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7419 {
7420  json.WriteString("Type");
7421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7422 
7423  json.WriteString("Size");
7424  json.WriteNumber(m_Size);
7425 
7426  if(m_pUserData != VMA_NULL)
7427  {
7428  json.WriteString("UserData");
7429  if(IsUserDataString())
7430  {
7431  json.WriteString((const char*)m_pUserData);
7432  }
7433  else
7434  {
7435  json.BeginString();
7436  json.ContinueString_Pointer(m_pUserData);
7437  json.EndString();
7438  }
7439  }
7440 
7441  json.WriteString("CreationFrameIndex");
7442  json.WriteNumber(m_CreationFrameIndex);
7443 
7444  json.WriteString("LastUseFrameIndex");
7445  json.WriteNumber(GetLastUseFrameIndex());
7446 
7447  if(m_BufferImageUsage != 0)
7448  {
7449  json.WriteString("Usage");
7450  json.WriteNumber(m_BufferImageUsage);
7451  }
7452 }
7453 
7454 #endif
7455 
7456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7457 {
7458  VMA_ASSERT(IsUserDataString());
7459  if(m_pUserData != VMA_NULL)
7460  {
7461  char* const oldStr = (char*)m_pUserData;
7462  const size_t oldStrLen = strlen(oldStr);
7463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7464  m_pUserData = VMA_NULL;
7465  }
7466 }
7467 
7468 void VmaAllocation_T::BlockAllocMap()
7469 {
7470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7471 
7472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7473  {
7474  ++m_MapCount;
7475  }
7476  else
7477  {
7478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7479  }
7480 }
7481 
7482 void VmaAllocation_T::BlockAllocUnmap()
7483 {
7484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7485 
7486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7487  {
7488  --m_MapCount;
7489  }
7490  else
7491  {
7492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7493  }
7494 }
7495 
7496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7497 {
7498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7499 
7500  if(m_MapCount != 0)
7501  {
7502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7503  {
7504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7505  *ppData = m_DedicatedAllocation.m_pMappedData;
7506  ++m_MapCount;
7507  return VK_SUCCESS;
7508  }
7509  else
7510  {
7511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7512  return VK_ERROR_MEMORY_MAP_FAILED;
7513  }
7514  }
7515  else
7516  {
7517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7518  hAllocator->m_hDevice,
7519  m_DedicatedAllocation.m_hMemory,
7520  0, // offset
7521  VK_WHOLE_SIZE,
7522  0, // flags
7523  ppData);
7524  if(result == VK_SUCCESS)
7525  {
7526  m_DedicatedAllocation.m_pMappedData = *ppData;
7527  m_MapCount = 1;
7528  }
7529  return result;
7530  }
7531 }
7532 
7533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7534 {
7535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7536 
7537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7538  {
7539  --m_MapCount;
7540  if(m_MapCount == 0)
7541  {
7542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7544  hAllocator->m_hDevice,
7545  m_DedicatedAllocation.m_hMemory);
7546  }
7547  }
7548  else
7549  {
7550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7551  }
7552 }
7553 
7554 #if VMA_STATS_STRING_ENABLED
7555 
7556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7557 {
7558  json.BeginObject();
7559 
7560  json.WriteString("Blocks");
7561  json.WriteNumber(stat.blockCount);
7562 
7563  json.WriteString("Allocations");
7564  json.WriteNumber(stat.allocationCount);
7565 
7566  json.WriteString("UnusedRanges");
7567  json.WriteNumber(stat.unusedRangeCount);
7568 
7569  json.WriteString("UsedBytes");
7570  json.WriteNumber(stat.usedBytes);
7571 
7572  json.WriteString("UnusedBytes");
7573  json.WriteNumber(stat.unusedBytes);
7574 
7575  if(stat.allocationCount > 1)
7576  {
7577  json.WriteString("AllocationSize");
7578  json.BeginObject(true);
7579  json.WriteString("Min");
7580  json.WriteNumber(stat.allocationSizeMin);
7581  json.WriteString("Avg");
7582  json.WriteNumber(stat.allocationSizeAvg);
7583  json.WriteString("Max");
7584  json.WriteNumber(stat.allocationSizeMax);
7585  json.EndObject();
7586  }
7587 
7588  if(stat.unusedRangeCount > 1)
7589  {
7590  json.WriteString("UnusedRangeSize");
7591  json.BeginObject(true);
7592  json.WriteString("Min");
7593  json.WriteNumber(stat.unusedRangeSizeMin);
7594  json.WriteString("Avg");
7595  json.WriteNumber(stat.unusedRangeSizeAvg);
7596  json.WriteString("Max");
7597  json.WriteNumber(stat.unusedRangeSizeMax);
7598  json.EndObject();
7599  }
7600 
7601  json.EndObject();
7602 }
7603 
7604 #endif // #if VMA_STATS_STRING_ENABLED
7605 
7606 struct VmaSuballocationItemSizeLess
7607 {
7608  bool operator()(
7609  const VmaSuballocationList::iterator lhs,
7610  const VmaSuballocationList::iterator rhs) const
7611  {
7612  return lhs->size < rhs->size;
7613  }
7614  bool operator()(
7615  const VmaSuballocationList::iterator lhs,
7616  VkDeviceSize rhsSize) const
7617  {
7618  return lhs->size < rhsSize;
7619  }
7620 };
7621 
7622 
7624 // class VmaBlockMetadata
7625 
7626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7627  m_Size(0),
7628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7629 {
7630 }
7631 
7632 #if VMA_STATS_STRING_ENABLED
7633 
7634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7635  VkDeviceSize unusedBytes,
7636  size_t allocationCount,
7637  size_t unusedRangeCount) const
7638 {
7639  json.BeginObject();
7640 
7641  json.WriteString("TotalBytes");
7642  json.WriteNumber(GetSize());
7643 
7644  json.WriteString("UnusedBytes");
7645  json.WriteNumber(unusedBytes);
7646 
7647  json.WriteString("Allocations");
7648  json.WriteNumber((uint64_t)allocationCount);
7649 
7650  json.WriteString("UnusedRanges");
7651  json.WriteNumber((uint64_t)unusedRangeCount);
7652 
7653  json.WriteString("Suballocations");
7654  json.BeginArray();
7655 }
7656 
7657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7658  VkDeviceSize offset,
7659  VmaAllocation hAllocation) const
7660 {
7661  json.BeginObject(true);
7662 
7663  json.WriteString("Offset");
7664  json.WriteNumber(offset);
7665 
7666  hAllocation->PrintParameters(json);
7667 
7668  json.EndObject();
7669 }
7670 
7671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7672  VkDeviceSize offset,
7673  VkDeviceSize size) const
7674 {
7675  json.BeginObject(true);
7676 
7677  json.WriteString("Offset");
7678  json.WriteNumber(offset);
7679 
7680  json.WriteString("Type");
7681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7682 
7683  json.WriteString("Size");
7684  json.WriteNumber(size);
7685 
7686  json.EndObject();
7687 }
7688 
7689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7690 {
7691  json.EndArray();
7692  json.EndObject();
7693 }
7694 
7695 #endif // #if VMA_STATS_STRING_ENABLED
7696 
7698 // class VmaBlockMetadata_Generic
7699 
7700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7701  VmaBlockMetadata(hAllocator),
7702  m_FreeCount(0),
7703  m_SumFreeSize(0),
7704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7706 {
7707 }
7708 
7709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7710 {
7711 }
7712 
7713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7714 {
7715  VmaBlockMetadata::Init(size);
7716 
7717  m_FreeCount = 1;
7718  m_SumFreeSize = size;
7719 
7720  VmaSuballocation suballoc = {};
7721  suballoc.offset = 0;
7722  suballoc.size = size;
7723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7724  suballoc.hAllocation = VK_NULL_HANDLE;
7725 
7726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7727  m_Suballocations.push_back(suballoc);
7728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7729  --suballocItem;
7730  m_FreeSuballocationsBySize.push_back(suballocItem);
7731 }
7732 
7733 bool VmaBlockMetadata_Generic::Validate() const
7734 {
7735  VMA_VALIDATE(!m_Suballocations.empty());
7736 
7737  // Expected offset of new suballocation as calculated from previous ones.
7738  VkDeviceSize calculatedOffset = 0;
7739  // Expected number of free suballocations as calculated from traversing their list.
7740  uint32_t calculatedFreeCount = 0;
7741  // Expected sum size of free suballocations as calculated from traversing their list.
7742  VkDeviceSize calculatedSumFreeSize = 0;
7743  // Expected number of free suballocations that should be registered in
7744  // m_FreeSuballocationsBySize calculated from traversing their list.
7745  size_t freeSuballocationsToRegister = 0;
7746  // True if previous visited suballocation was free.
7747  bool prevFree = false;
7748 
7749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7750  suballocItem != m_Suballocations.cend();
7751  ++suballocItem)
7752  {
7753  const VmaSuballocation& subAlloc = *suballocItem;
7754 
7755  // Actual offset of this suballocation doesn't match expected one.
7756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7757 
7758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7759  // Two adjacent free suballocations are invalid. They should be merged.
7760  VMA_VALIDATE(!prevFree || !currFree);
7761 
7762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7763 
7764  if(currFree)
7765  {
7766  calculatedSumFreeSize += subAlloc.size;
7767  ++calculatedFreeCount;
7768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7769  {
7770  ++freeSuballocationsToRegister;
7771  }
7772 
7773  // Margin required between allocations - every free space must be at least that large.
7774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7775  }
7776  else
7777  {
7778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7780 
7781  // Margin required between allocations - previous allocation must be free.
7782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7783  }
7784 
7785  calculatedOffset += subAlloc.size;
7786  prevFree = currFree;
7787  }
7788 
7789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7790  // match expected one.
7791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7792 
7793  VkDeviceSize lastSize = 0;
7794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7795  {
7796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7797 
7798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7800  // They must be sorted by size ascending.
7801  VMA_VALIDATE(suballocItem->size >= lastSize);
7802 
7803  lastSize = suballocItem->size;
7804  }
7805 
7806  // Check if totals match calculacted values.
7807  VMA_VALIDATE(ValidateFreeSuballocationList());
7808  VMA_VALIDATE(calculatedOffset == GetSize());
7809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7811 
7812  return true;
7813 }
7814 
7815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7816 {
7817  if(!m_FreeSuballocationsBySize.empty())
7818  {
7819  return m_FreeSuballocationsBySize.back()->size;
7820  }
7821  else
7822  {
7823  return 0;
7824  }
7825 }
7826 
7827 bool VmaBlockMetadata_Generic::IsEmpty() const
7828 {
7829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7830 }
7831 
7832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7833 {
7834  outInfo.blockCount = 1;
7835 
7836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7837  outInfo.allocationCount = rangeCount - m_FreeCount;
7838  outInfo.unusedRangeCount = m_FreeCount;
7839 
7840  outInfo.unusedBytes = m_SumFreeSize;
7841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7842 
7843  outInfo.allocationSizeMin = UINT64_MAX;
7844  outInfo.allocationSizeMax = 0;
7845  outInfo.unusedRangeSizeMin = UINT64_MAX;
7846  outInfo.unusedRangeSizeMax = 0;
7847 
7848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7849  suballocItem != m_Suballocations.cend();
7850  ++suballocItem)
7851  {
7852  const VmaSuballocation& suballoc = *suballocItem;
7853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7854  {
7855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7857  }
7858  else
7859  {
7860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7862  }
7863  }
7864 }
7865 
7866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7867 {
7868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7869 
7870  inoutStats.size += GetSize();
7871  inoutStats.unusedSize += m_SumFreeSize;
7872  inoutStats.allocationCount += rangeCount - m_FreeCount;
7873  inoutStats.unusedRangeCount += m_FreeCount;
7874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7875 }
7876 
7877 #if VMA_STATS_STRING_ENABLED
7878 
7879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7880 {
7881  PrintDetailedMap_Begin(json,
7882  m_SumFreeSize, // unusedBytes
7883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7884  m_FreeCount); // unusedRangeCount
7885 
7886  size_t i = 0;
7887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7888  suballocItem != m_Suballocations.cend();
7889  ++suballocItem, ++i)
7890  {
7891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7892  {
7893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7894  }
7895  else
7896  {
7897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7898  }
7899  }
7900 
7901  PrintDetailedMap_End(json);
7902 }
7903 
7904 #endif // #if VMA_STATS_STRING_ENABLED
7905 
7906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7907  uint32_t currentFrameIndex,
7908  uint32_t frameInUseCount,
7909  VkDeviceSize bufferImageGranularity,
7910  VkDeviceSize allocSize,
7911  VkDeviceSize allocAlignment,
7912  bool upperAddress,
7913  VmaSuballocationType allocType,
7914  bool canMakeOtherLost,
7915  uint32_t strategy,
7916  VmaAllocationRequest* pAllocationRequest)
7917 {
7918  VMA_ASSERT(allocSize > 0);
7919  VMA_ASSERT(!upperAddress);
7920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7922  VMA_HEAVY_ASSERT(Validate());
7923 
7924  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7925 
7926  // There is not enough total free space in this block to fullfill the request: Early return.
7927  if(canMakeOtherLost == false &&
7928  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7929  {
7930  return false;
7931  }
7932 
7933  // New algorithm, efficiently searching freeSuballocationsBySize.
7934  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7935  if(freeSuballocCount > 0)
7936  {
7938  {
7939  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7940  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7941  m_FreeSuballocationsBySize.data(),
7942  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7943  allocSize + 2 * VMA_DEBUG_MARGIN,
7944  VmaSuballocationItemSizeLess());
7945  size_t index = it - m_FreeSuballocationsBySize.data();
7946  for(; index < freeSuballocCount; ++index)
7947  {
7948  if(CheckAllocation(
7949  currentFrameIndex,
7950  frameInUseCount,
7951  bufferImageGranularity,
7952  allocSize,
7953  allocAlignment,
7954  allocType,
7955  m_FreeSuballocationsBySize[index],
7956  false, // canMakeOtherLost
7957  &pAllocationRequest->offset,
7958  &pAllocationRequest->itemsToMakeLostCount,
7959  &pAllocationRequest->sumFreeSize,
7960  &pAllocationRequest->sumItemSize))
7961  {
7962  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7963  return true;
7964  }
7965  }
7966  }
7967  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7968  {
7969  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7970  it != m_Suballocations.end();
7971  ++it)
7972  {
7973  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7974  currentFrameIndex,
7975  frameInUseCount,
7976  bufferImageGranularity,
7977  allocSize,
7978  allocAlignment,
7979  allocType,
7980  it,
7981  false, // canMakeOtherLost
7982  &pAllocationRequest->offset,
7983  &pAllocationRequest->itemsToMakeLostCount,
7984  &pAllocationRequest->sumFreeSize,
7985  &pAllocationRequest->sumItemSize))
7986  {
7987  pAllocationRequest->item = it;
7988  return true;
7989  }
7990  }
7991  }
7992  else // WORST_FIT, FIRST_FIT
7993  {
7994  // Search staring from biggest suballocations.
7995  for(size_t index = freeSuballocCount; index--; )
7996  {
7997  if(CheckAllocation(
7998  currentFrameIndex,
7999  frameInUseCount,
8000  bufferImageGranularity,
8001  allocSize,
8002  allocAlignment,
8003  allocType,
8004  m_FreeSuballocationsBySize[index],
8005  false, // canMakeOtherLost
8006  &pAllocationRequest->offset,
8007  &pAllocationRequest->itemsToMakeLostCount,
8008  &pAllocationRequest->sumFreeSize,
8009  &pAllocationRequest->sumItemSize))
8010  {
8011  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8012  return true;
8013  }
8014  }
8015  }
8016  }
8017 
8018  if(canMakeOtherLost)
8019  {
8020  // Brute-force algorithm. TODO: Come up with something better.
8021 
8022  bool found = false;
8023  VmaAllocationRequest tmpAllocRequest = {};
8024  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8025  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8026  suballocIt != m_Suballocations.end();
8027  ++suballocIt)
8028  {
8029  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8030  suballocIt->hAllocation->CanBecomeLost())
8031  {
8032  if(CheckAllocation(
8033  currentFrameIndex,
8034  frameInUseCount,
8035  bufferImageGranularity,
8036  allocSize,
8037  allocAlignment,
8038  allocType,
8039  suballocIt,
8040  canMakeOtherLost,
8041  &tmpAllocRequest.offset,
8042  &tmpAllocRequest.itemsToMakeLostCount,
8043  &tmpAllocRequest.sumFreeSize,
8044  &tmpAllocRequest.sumItemSize))
8045  {
8047  {
8048  *pAllocationRequest = tmpAllocRequest;
8049  pAllocationRequest->item = suballocIt;
8050  break;
8051  }
8052  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8053  {
8054  *pAllocationRequest = tmpAllocRequest;
8055  pAllocationRequest->item = suballocIt;
8056  found = true;
8057  }
8058  }
8059  }
8060  }
8061 
8062  return found;
8063  }
8064 
8065  return false;
8066 }
8067 
8068 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8069  uint32_t currentFrameIndex,
8070  uint32_t frameInUseCount,
8071  VmaAllocationRequest* pAllocationRequest)
8072 {
8073  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8074 
8075  while(pAllocationRequest->itemsToMakeLostCount > 0)
8076  {
8077  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8078  {
8079  ++pAllocationRequest->item;
8080  }
8081  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8082  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8083  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8084  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8085  {
8086  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8087  --pAllocationRequest->itemsToMakeLostCount;
8088  }
8089  else
8090  {
8091  return false;
8092  }
8093  }
8094 
8095  VMA_HEAVY_ASSERT(Validate());
8096  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8097  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8098 
8099  return true;
8100 }
8101 
8102 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8103 {
8104  uint32_t lostAllocationCount = 0;
8105  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8106  it != m_Suballocations.end();
8107  ++it)
8108  {
8109  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8110  it->hAllocation->CanBecomeLost() &&
8111  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8112  {
8113  it = FreeSuballocation(it);
8114  ++lostAllocationCount;
8115  }
8116  }
8117  return lostAllocationCount;
8118 }
8119 
8120 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8121 {
8122  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8123  it != m_Suballocations.end();
8124  ++it)
8125  {
8126  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8127  {
8128  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8129  {
8130  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8131  return VK_ERROR_VALIDATION_FAILED_EXT;
8132  }
8133  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8134  {
8135  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8136  return VK_ERROR_VALIDATION_FAILED_EXT;
8137  }
8138  }
8139  }
8140 
8141  return VK_SUCCESS;
8142 }
8143 
8144 void VmaBlockMetadata_Generic::Alloc(
8145  const VmaAllocationRequest& request,
8146  VmaSuballocationType type,
8147  VkDeviceSize allocSize,
8148  VmaAllocation hAllocation)
8149 {
8150  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8151  VMA_ASSERT(request.item != m_Suballocations.end());
8152  VmaSuballocation& suballoc = *request.item;
8153  // Given suballocation is a free block.
8154  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8155  // Given offset is inside this suballocation.
8156  VMA_ASSERT(request.offset >= suballoc.offset);
8157  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8158  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8159  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8160 
8161  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8162  // it to become used.
8163  UnregisterFreeSuballocation(request.item);
8164 
8165  suballoc.offset = request.offset;
8166  suballoc.size = allocSize;
8167  suballoc.type = type;
8168  suballoc.hAllocation = hAllocation;
8169 
8170  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8171  if(paddingEnd)
8172  {
8173  VmaSuballocation paddingSuballoc = {};
8174  paddingSuballoc.offset = request.offset + allocSize;
8175  paddingSuballoc.size = paddingEnd;
8176  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8177  VmaSuballocationList::iterator next = request.item;
8178  ++next;
8179  const VmaSuballocationList::iterator paddingEndItem =
8180  m_Suballocations.insert(next, paddingSuballoc);
8181  RegisterFreeSuballocation(paddingEndItem);
8182  }
8183 
8184  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8185  if(paddingBegin)
8186  {
8187  VmaSuballocation paddingSuballoc = {};
8188  paddingSuballoc.offset = request.offset - paddingBegin;
8189  paddingSuballoc.size = paddingBegin;
8190  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8191  const VmaSuballocationList::iterator paddingBeginItem =
8192  m_Suballocations.insert(request.item, paddingSuballoc);
8193  RegisterFreeSuballocation(paddingBeginItem);
8194  }
8195 
8196  // Update totals.
8197  m_FreeCount = m_FreeCount - 1;
8198  if(paddingBegin > 0)
8199  {
8200  ++m_FreeCount;
8201  }
8202  if(paddingEnd > 0)
8203  {
8204  ++m_FreeCount;
8205  }
8206  m_SumFreeSize -= allocSize;
8207 }
8208 
8209 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8210 {
8211  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8212  suballocItem != m_Suballocations.end();
8213  ++suballocItem)
8214  {
8215  VmaSuballocation& suballoc = *suballocItem;
8216  if(suballoc.hAllocation == allocation)
8217  {
8218  FreeSuballocation(suballocItem);
8219  VMA_HEAVY_ASSERT(Validate());
8220  return;
8221  }
8222  }
8223  VMA_ASSERT(0 && "Not found!");
8224 }
8225 
8226 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8227 {
8228  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8229  suballocItem != m_Suballocations.end();
8230  ++suballocItem)
8231  {
8232  VmaSuballocation& suballoc = *suballocItem;
8233  if(suballoc.offset == offset)
8234  {
8235  FreeSuballocation(suballocItem);
8236  return;
8237  }
8238  }
8239  VMA_ASSERT(0 && "Not found!");
8240 }
8241 
8242 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8243 {
8244  typedef VmaSuballocationList::iterator iter_type;
8245  for(iter_type suballocItem = m_Suballocations.begin();
8246  suballocItem != m_Suballocations.end();
8247  ++suballocItem)
8248  {
8249  VmaSuballocation& suballoc = *suballocItem;
8250  if(suballoc.hAllocation == alloc)
8251  {
8252  iter_type nextItem = suballocItem;
8253  ++nextItem;
8254 
8255  // Should have been ensured on higher level.
8256  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8257 
8258  // Shrinking.
8259  if(newSize < alloc->GetSize())
8260  {
8261  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8262 
8263  // There is next item.
8264  if(nextItem != m_Suballocations.end())
8265  {
8266  // Next item is free.
8267  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8268  {
8269  // Grow this next item backward.
8270  UnregisterFreeSuballocation(nextItem);
8271  nextItem->offset -= sizeDiff;
8272  nextItem->size += sizeDiff;
8273  RegisterFreeSuballocation(nextItem);
8274  }
8275  // Next item is not free.
8276  else
8277  {
8278  // Create free item after current one.
8279  VmaSuballocation newFreeSuballoc;
8280  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8281  newFreeSuballoc.offset = suballoc.offset + newSize;
8282  newFreeSuballoc.size = sizeDiff;
8283  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8284  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8285  RegisterFreeSuballocation(newFreeSuballocIt);
8286 
8287  ++m_FreeCount;
8288  }
8289  }
8290  // This is the last item.
8291  else
8292  {
8293  // Create free item at the end.
8294  VmaSuballocation newFreeSuballoc;
8295  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8296  newFreeSuballoc.offset = suballoc.offset + newSize;
8297  newFreeSuballoc.size = sizeDiff;
8298  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8299  m_Suballocations.push_back(newFreeSuballoc);
8300 
8301  iter_type newFreeSuballocIt = m_Suballocations.end();
8302  RegisterFreeSuballocation(--newFreeSuballocIt);
8303 
8304  ++m_FreeCount;
8305  }
8306 
8307  suballoc.size = newSize;
8308  m_SumFreeSize += sizeDiff;
8309  }
8310  // Growing.
8311  else
8312  {
8313  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8314 
8315  // There is next item.
8316  if(nextItem != m_Suballocations.end())
8317  {
8318  // Next item is free.
8319  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8320  {
8321  // There is not enough free space, including margin.
8322  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8323  {
8324  return false;
8325  }
8326 
8327  // There is more free space than required.
8328  if(nextItem->size > sizeDiff)
8329  {
8330  // Move and shrink this next item.
8331  UnregisterFreeSuballocation(nextItem);
8332  nextItem->offset += sizeDiff;
8333  nextItem->size -= sizeDiff;
8334  RegisterFreeSuballocation(nextItem);
8335  }
8336  // There is exactly the amount of free space required.
8337  else
8338  {
8339  // Remove this next free item.
8340  UnregisterFreeSuballocation(nextItem);
8341  m_Suballocations.erase(nextItem);
8342  --m_FreeCount;
8343  }
8344  }
8345  // Next item is not free - there is no space to grow.
8346  else
8347  {
8348  return false;
8349  }
8350  }
8351  // This is the last item - there is no space to grow.
8352  else
8353  {
8354  return false;
8355  }
8356 
8357  suballoc.size = newSize;
8358  m_SumFreeSize -= sizeDiff;
8359  }
8360 
8361  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8362  return true;
8363  }
8364  }
8365  VMA_ASSERT(0 && "Not found!");
8366  return false;
8367 }
8368 
8369 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8370 {
8371  VkDeviceSize lastSize = 0;
8372  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8373  {
8374  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8375 
8376  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8377  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8378  VMA_VALIDATE(it->size >= lastSize);
8379  lastSize = it->size;
8380  }
8381  return true;
8382 }
8383 
8384 bool VmaBlockMetadata_Generic::CheckAllocation(
8385  uint32_t currentFrameIndex,
8386  uint32_t frameInUseCount,
8387  VkDeviceSize bufferImageGranularity,
8388  VkDeviceSize allocSize,
8389  VkDeviceSize allocAlignment,
8390  VmaSuballocationType allocType,
8391  VmaSuballocationList::const_iterator suballocItem,
8392  bool canMakeOtherLost,
8393  VkDeviceSize* pOffset,
8394  size_t* itemsToMakeLostCount,
8395  VkDeviceSize* pSumFreeSize,
8396  VkDeviceSize* pSumItemSize) const
8397 {
8398  VMA_ASSERT(allocSize > 0);
8399  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8400  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8401  VMA_ASSERT(pOffset != VMA_NULL);
8402 
8403  *itemsToMakeLostCount = 0;
8404  *pSumFreeSize = 0;
8405  *pSumItemSize = 0;
8406 
8407  if(canMakeOtherLost)
8408  {
8409  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8410  {
8411  *pSumFreeSize = suballocItem->size;
8412  }
8413  else
8414  {
8415  if(suballocItem->hAllocation->CanBecomeLost() &&
8416  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8417  {
8418  ++*itemsToMakeLostCount;
8419  *pSumItemSize = suballocItem->size;
8420  }
8421  else
8422  {
8423  return false;
8424  }
8425  }
8426 
8427  // Remaining size is too small for this request: Early return.
8428  if(GetSize() - suballocItem->offset < allocSize)
8429  {
8430  return false;
8431  }
8432 
8433  // Start from offset equal to beginning of this suballocation.
8434  *pOffset = suballocItem->offset;
8435 
8436  // Apply VMA_DEBUG_MARGIN at the beginning.
8437  if(VMA_DEBUG_MARGIN > 0)
8438  {
8439  *pOffset += VMA_DEBUG_MARGIN;
8440  }
8441 
8442  // Apply alignment.
8443  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8444 
8445  // Check previous suballocations for BufferImageGranularity conflicts.
8446  // Make bigger alignment if necessary.
8447  if(bufferImageGranularity > 1)
8448  {
8449  bool bufferImageGranularityConflict = false;
8450  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8451  while(prevSuballocItem != m_Suballocations.cbegin())
8452  {
8453  --prevSuballocItem;
8454  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8455  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8456  {
8457  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8458  {
8459  bufferImageGranularityConflict = true;
8460  break;
8461  }
8462  }
8463  else
8464  // Already on previous page.
8465  break;
8466  }
8467  if(bufferImageGranularityConflict)
8468  {
8469  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8470  }
8471  }
8472 
8473  // Now that we have final *pOffset, check if we are past suballocItem.
8474  // If yes, return false - this function should be called for another suballocItem as starting point.
8475  if(*pOffset >= suballocItem->offset + suballocItem->size)
8476  {
8477  return false;
8478  }
8479 
8480  // Calculate padding at the beginning based on current offset.
8481  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8482 
8483  // Calculate required margin at the end.
8484  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8485 
8486  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8487  // Another early return check.
8488  if(suballocItem->offset + totalSize > GetSize())
8489  {
8490  return false;
8491  }
8492 
8493  // Advance lastSuballocItem until desired size is reached.
8494  // Update itemsToMakeLostCount.
8495  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8496  if(totalSize > suballocItem->size)
8497  {
8498  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8499  while(remainingSize > 0)
8500  {
8501  ++lastSuballocItem;
8502  if(lastSuballocItem == m_Suballocations.cend())
8503  {
8504  return false;
8505  }
8506  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8507  {
8508  *pSumFreeSize += lastSuballocItem->size;
8509  }
8510  else
8511  {
8512  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8513  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8514  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8515  {
8516  ++*itemsToMakeLostCount;
8517  *pSumItemSize += lastSuballocItem->size;
8518  }
8519  else
8520  {
8521  return false;
8522  }
8523  }
8524  remainingSize = (lastSuballocItem->size < remainingSize) ?
8525  remainingSize - lastSuballocItem->size : 0;
8526  }
8527  }
8528 
8529  // Check next suballocations for BufferImageGranularity conflicts.
8530  // If conflict exists, we must mark more allocations lost or fail.
8531  if(bufferImageGranularity > 1)
8532  {
8533  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8534  ++nextSuballocItem;
8535  while(nextSuballocItem != m_Suballocations.cend())
8536  {
8537  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8538  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8539  {
8540  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8541  {
8542  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8543  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8544  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8545  {
8546  ++*itemsToMakeLostCount;
8547  }
8548  else
8549  {
8550  return false;
8551  }
8552  }
8553  }
8554  else
8555  {
8556  // Already on next page.
8557  break;
8558  }
8559  ++nextSuballocItem;
8560  }
8561  }
8562  }
8563  else
8564  {
8565  const VmaSuballocation& suballoc = *suballocItem;
8566  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8567 
8568  *pSumFreeSize = suballoc.size;
8569 
8570  // Size of this suballocation is too small for this request: Early return.
8571  if(suballoc.size < allocSize)
8572  {
8573  return false;
8574  }
8575 
8576  // Start from offset equal to beginning of this suballocation.
8577  *pOffset = suballoc.offset;
8578 
8579  // Apply VMA_DEBUG_MARGIN at the beginning.
8580  if(VMA_DEBUG_MARGIN > 0)
8581  {
8582  *pOffset += VMA_DEBUG_MARGIN;
8583  }
8584 
8585  // Apply alignment.
8586  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8587 
8588  // Check previous suballocations for BufferImageGranularity conflicts.
8589  // Make bigger alignment if necessary.
8590  if(bufferImageGranularity > 1)
8591  {
8592  bool bufferImageGranularityConflict = false;
8593  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8594  while(prevSuballocItem != m_Suballocations.cbegin())
8595  {
8596  --prevSuballocItem;
8597  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8598  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8599  {
8600  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8601  {
8602  bufferImageGranularityConflict = true;
8603  break;
8604  }
8605  }
8606  else
8607  // Already on previous page.
8608  break;
8609  }
8610  if(bufferImageGranularityConflict)
8611  {
8612  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8613  }
8614  }
8615 
8616  // Calculate padding at the beginning based on current offset.
8617  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8618 
8619  // Calculate required margin at the end.
8620  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8621 
8622  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8623  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8624  {
8625  return false;
8626  }
8627 
8628  // Check next suballocations for BufferImageGranularity conflicts.
8629  // If conflict exists, allocation cannot be made here.
8630  if(bufferImageGranularity > 1)
8631  {
8632  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8633  ++nextSuballocItem;
8634  while(nextSuballocItem != m_Suballocations.cend())
8635  {
8636  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8637  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8638  {
8639  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8640  {
8641  return false;
8642  }
8643  }
8644  else
8645  {
8646  // Already on next page.
8647  break;
8648  }
8649  ++nextSuballocItem;
8650  }
8651  }
8652  }
8653 
8654  // All tests passed: Success. pOffset is already filled.
8655  return true;
8656 }
8657 
8658 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8659 {
8660  VMA_ASSERT(item != m_Suballocations.end());
8661  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8662 
8663  VmaSuballocationList::iterator nextItem = item;
8664  ++nextItem;
8665  VMA_ASSERT(nextItem != m_Suballocations.end());
8666  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8667 
8668  item->size += nextItem->size;
8669  --m_FreeCount;
8670  m_Suballocations.erase(nextItem);
8671 }
8672 
8673 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8674 {
8675  // Change this suballocation to be marked as free.
8676  VmaSuballocation& suballoc = *suballocItem;
8677  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8678  suballoc.hAllocation = VK_NULL_HANDLE;
8679 
8680  // Update totals.
8681  ++m_FreeCount;
8682  m_SumFreeSize += suballoc.size;
8683 
8684  // Merge with previous and/or next suballocation if it's also free.
8685  bool mergeWithNext = false;
8686  bool mergeWithPrev = false;
8687 
8688  VmaSuballocationList::iterator nextItem = suballocItem;
8689  ++nextItem;
8690  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8691  {
8692  mergeWithNext = true;
8693  }
8694 
8695  VmaSuballocationList::iterator prevItem = suballocItem;
8696  if(suballocItem != m_Suballocations.begin())
8697  {
8698  --prevItem;
8699  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8700  {
8701  mergeWithPrev = true;
8702  }
8703  }
8704 
8705  if(mergeWithNext)
8706  {
8707  UnregisterFreeSuballocation(nextItem);
8708  MergeFreeWithNext(suballocItem);
8709  }
8710 
8711  if(mergeWithPrev)
8712  {
8713  UnregisterFreeSuballocation(prevItem);
8714  MergeFreeWithNext(prevItem);
8715  RegisterFreeSuballocation(prevItem);
8716  return prevItem;
8717  }
8718  else
8719  {
8720  RegisterFreeSuballocation(suballocItem);
8721  return suballocItem;
8722  }
8723 }
8724 
8725 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8726 {
8727  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8728  VMA_ASSERT(item->size > 0);
8729 
8730  // You may want to enable this validation at the beginning or at the end of
8731  // this function, depending on what do you want to check.
8732  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8733 
8734  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8735  {
8736  if(m_FreeSuballocationsBySize.empty())
8737  {
8738  m_FreeSuballocationsBySize.push_back(item);
8739  }
8740  else
8741  {
8742  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8743  }
8744  }
8745 
8746  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8747 }
8748 
8749 
8750 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8751 {
8752  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8753  VMA_ASSERT(item->size > 0);
8754 
8755  // You may want to enable this validation at the beginning or at the end of
8756  // this function, depending on what do you want to check.
8757  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8758 
8759  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8760  {
8761  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8762  m_FreeSuballocationsBySize.data(),
8763  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8764  item,
8765  VmaSuballocationItemSizeLess());
8766  for(size_t index = it - m_FreeSuballocationsBySize.data();
8767  index < m_FreeSuballocationsBySize.size();
8768  ++index)
8769  {
8770  if(m_FreeSuballocationsBySize[index] == item)
8771  {
8772  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8773  return;
8774  }
8775  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8776  }
8777  VMA_ASSERT(0 && "Not found.");
8778  }
8779 
8780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8781 }
8782 
8783 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8784  VkDeviceSize bufferImageGranularity,
8785  VmaSuballocationType& inOutPrevSuballocType) const
8786 {
8787  if(bufferImageGranularity == 1 || IsEmpty())
8788  {
8789  return false;
8790  }
8791 
8792  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8793  bool typeConflictFound = false;
8794  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8795  it != m_Suballocations.cend();
8796  ++it)
8797  {
8798  const VmaSuballocationType suballocType = it->type;
8799  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8800  {
8801  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8802  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8803  {
8804  typeConflictFound = true;
8805  }
8806  inOutPrevSuballocType = suballocType;
8807  }
8808  }
8809 
8810  return typeConflictFound || minAlignment >= bufferImageGranularity;
8811 }
8812 
8814 // class VmaBlockMetadata_Linear
8815 
8816 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8817  VmaBlockMetadata(hAllocator),
8818  m_SumFreeSize(0),
8819  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8820  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8821  m_1stVectorIndex(0),
8822  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8823  m_1stNullItemsBeginCount(0),
8824  m_1stNullItemsMiddleCount(0),
8825  m_2ndNullItemsCount(0)
8826 {
8827 }
8828 
8829 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8830 {
8831 }
8832 
8833 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8834 {
8835  VmaBlockMetadata::Init(size);
8836  m_SumFreeSize = size;
8837 }
8838 
8839 bool VmaBlockMetadata_Linear::Validate() const
8840 {
8841  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8842  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8843 
8844  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8845  VMA_VALIDATE(!suballocations1st.empty() ||
8846  suballocations2nd.empty() ||
8847  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8848 
8849  if(!suballocations1st.empty())
8850  {
8851  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8852  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8853  // Null item at the end should be just pop_back().
8854  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8855  }
8856  if(!suballocations2nd.empty())
8857  {
8858  // Null item at the end should be just pop_back().
8859  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8860  }
8861 
8862  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8863  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8864 
8865  VkDeviceSize sumUsedSize = 0;
8866  const size_t suballoc1stCount = suballocations1st.size();
8867  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8868 
8869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8870  {
8871  const size_t suballoc2ndCount = suballocations2nd.size();
8872  size_t nullItem2ndCount = 0;
8873  for(size_t i = 0; i < suballoc2ndCount; ++i)
8874  {
8875  const VmaSuballocation& suballoc = suballocations2nd[i];
8876  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8877 
8878  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8879  VMA_VALIDATE(suballoc.offset >= offset);
8880 
8881  if(!currFree)
8882  {
8883  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8884  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8885  sumUsedSize += suballoc.size;
8886  }
8887  else
8888  {
8889  ++nullItem2ndCount;
8890  }
8891 
8892  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8893  }
8894 
8895  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8896  }
8897 
8898  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8899  {
8900  const VmaSuballocation& suballoc = suballocations1st[i];
8901  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8902  suballoc.hAllocation == VK_NULL_HANDLE);
8903  }
8904 
8905  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8906 
8907  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8908  {
8909  const VmaSuballocation& suballoc = suballocations1st[i];
8910  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8911 
8912  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8913  VMA_VALIDATE(suballoc.offset >= offset);
8914  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8915 
8916  if(!currFree)
8917  {
8918  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8919  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8920  sumUsedSize += suballoc.size;
8921  }
8922  else
8923  {
8924  ++nullItem1stCount;
8925  }
8926 
8927  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8928  }
8929  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8930 
8931  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8932  {
8933  const size_t suballoc2ndCount = suballocations2nd.size();
8934  size_t nullItem2ndCount = 0;
8935  for(size_t i = suballoc2ndCount; i--; )
8936  {
8937  const VmaSuballocation& suballoc = suballocations2nd[i];
8938  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8939 
8940  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8941  VMA_VALIDATE(suballoc.offset >= offset);
8942 
8943  if(!currFree)
8944  {
8945  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8946  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8947  sumUsedSize += suballoc.size;
8948  }
8949  else
8950  {
8951  ++nullItem2ndCount;
8952  }
8953 
8954  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8955  }
8956 
8957  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8958  }
8959 
8960  VMA_VALIDATE(offset <= GetSize());
8961  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8962 
8963  return true;
8964 }
8965 
8966 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8967 {
8968  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8969  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8970 }
8971 
8972 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8973 {
8974  const VkDeviceSize size = GetSize();
8975 
8976  /*
8977  We don't consider gaps inside allocation vectors with freed allocations because
8978  they are not suitable for reuse in linear allocator. We consider only space that
8979  is available for new allocations.
8980  */
8981  if(IsEmpty())
8982  {
8983  return size;
8984  }
8985 
8986  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8987 
8988  switch(m_2ndVectorMode)
8989  {
8990  case SECOND_VECTOR_EMPTY:
8991  /*
8992  Available space is after end of 1st, as well as before beginning of 1st (which
8993  whould make it a ring buffer).
8994  */
8995  {
8996  const size_t suballocations1stCount = suballocations1st.size();
8997  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8998  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8999  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9000  return VMA_MAX(
9001  firstSuballoc.offset,
9002  size - (lastSuballoc.offset + lastSuballoc.size));
9003  }
9004  break;
9005 
9006  case SECOND_VECTOR_RING_BUFFER:
9007  /*
9008  Available space is only between end of 2nd and beginning of 1st.
9009  */
9010  {
9011  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9012  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9013  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9014  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9015  }
9016  break;
9017 
9018  case SECOND_VECTOR_DOUBLE_STACK:
9019  /*
9020  Available space is only between end of 1st and top of 2nd.
9021  */
9022  {
9023  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9024  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9025  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9026  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9027  }
9028  break;
9029 
9030  default:
9031  VMA_ASSERT(0);
9032  return 0;
9033  }
9034 }
9035 
9036 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9037 {
9038  const VkDeviceSize size = GetSize();
9039  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9040  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9041  const size_t suballoc1stCount = suballocations1st.size();
9042  const size_t suballoc2ndCount = suballocations2nd.size();
9043 
9044  outInfo.blockCount = 1;
9045  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9046  outInfo.unusedRangeCount = 0;
9047  outInfo.usedBytes = 0;
9048  outInfo.allocationSizeMin = UINT64_MAX;
9049  outInfo.allocationSizeMax = 0;
9050  outInfo.unusedRangeSizeMin = UINT64_MAX;
9051  outInfo.unusedRangeSizeMax = 0;
9052 
9053  VkDeviceSize lastOffset = 0;
9054 
9055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9056  {
9057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9058  size_t nextAlloc2ndIndex = 0;
9059  while(lastOffset < freeSpace2ndTo1stEnd)
9060  {
9061  // Find next non-null allocation or move nextAllocIndex to the end.
9062  while(nextAlloc2ndIndex < suballoc2ndCount &&
9063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9064  {
9065  ++nextAlloc2ndIndex;
9066  }
9067 
9068  // Found non-null allocation.
9069  if(nextAlloc2ndIndex < suballoc2ndCount)
9070  {
9071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9072 
9073  // 1. Process free space before this allocation.
9074  if(lastOffset < suballoc.offset)
9075  {
9076  // There is free space from lastOffset to suballoc.offset.
9077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9078  ++outInfo.unusedRangeCount;
9079  outInfo.unusedBytes += unusedRangeSize;
9080  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9081  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9082  }
9083 
9084  // 2. Process this allocation.
9085  // There is allocation with suballoc.offset, suballoc.size.
9086  outInfo.usedBytes += suballoc.size;
9087  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9088  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9089 
9090  // 3. Prepare for next iteration.
9091  lastOffset = suballoc.offset + suballoc.size;
9092  ++nextAlloc2ndIndex;
9093  }
9094  // We are at the end.
9095  else
9096  {
9097  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9098  if(lastOffset < freeSpace2ndTo1stEnd)
9099  {
9100  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9101  ++outInfo.unusedRangeCount;
9102  outInfo.unusedBytes += unusedRangeSize;
9103  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9104  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9105  }
9106 
9107  // End of loop.
9108  lastOffset = freeSpace2ndTo1stEnd;
9109  }
9110  }
9111  }
9112 
9113  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9114  const VkDeviceSize freeSpace1stTo2ndEnd =
9115  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9116  while(lastOffset < freeSpace1stTo2ndEnd)
9117  {
9118  // Find next non-null allocation or move nextAllocIndex to the end.
9119  while(nextAlloc1stIndex < suballoc1stCount &&
9120  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9121  {
9122  ++nextAlloc1stIndex;
9123  }
9124 
9125  // Found non-null allocation.
9126  if(nextAlloc1stIndex < suballoc1stCount)
9127  {
9128  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9129 
9130  // 1. Process free space before this allocation.
9131  if(lastOffset < suballoc.offset)
9132  {
9133  // There is free space from lastOffset to suballoc.offset.
9134  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9135  ++outInfo.unusedRangeCount;
9136  outInfo.unusedBytes += unusedRangeSize;
9137  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9138  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9139  }
9140 
9141  // 2. Process this allocation.
9142  // There is allocation with suballoc.offset, suballoc.size.
9143  outInfo.usedBytes += suballoc.size;
9144  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9145  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9146 
9147  // 3. Prepare for next iteration.
9148  lastOffset = suballoc.offset + suballoc.size;
9149  ++nextAlloc1stIndex;
9150  }
9151  // We are at the end.
9152  else
9153  {
9154  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9155  if(lastOffset < freeSpace1stTo2ndEnd)
9156  {
9157  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9158  ++outInfo.unusedRangeCount;
9159  outInfo.unusedBytes += unusedRangeSize;
9160  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9161  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9162  }
9163 
9164  // End of loop.
9165  lastOffset = freeSpace1stTo2ndEnd;
9166  }
9167  }
9168 
9169  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9170  {
9171  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9172  while(lastOffset < size)
9173  {
9174  // Find next non-null allocation or move nextAllocIndex to the end.
9175  while(nextAlloc2ndIndex != SIZE_MAX &&
9176  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9177  {
9178  --nextAlloc2ndIndex;
9179  }
9180 
9181  // Found non-null allocation.
9182  if(nextAlloc2ndIndex != SIZE_MAX)
9183  {
9184  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9185 
9186  // 1. Process free space before this allocation.
9187  if(lastOffset < suballoc.offset)
9188  {
9189  // There is free space from lastOffset to suballoc.offset.
9190  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9191  ++outInfo.unusedRangeCount;
9192  outInfo.unusedBytes += unusedRangeSize;
9193  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9194  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9195  }
9196 
9197  // 2. Process this allocation.
9198  // There is allocation with suballoc.offset, suballoc.size.
9199  outInfo.usedBytes += suballoc.size;
9200  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9201  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9202 
9203  // 3. Prepare for next iteration.
9204  lastOffset = suballoc.offset + suballoc.size;
9205  --nextAlloc2ndIndex;
9206  }
9207  // We are at the end.
9208  else
9209  {
9210  // There is free space from lastOffset to size.
9211  if(lastOffset < size)
9212  {
9213  const VkDeviceSize unusedRangeSize = size - lastOffset;
9214  ++outInfo.unusedRangeCount;
9215  outInfo.unusedBytes += unusedRangeSize;
9216  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9217  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9218  }
9219 
9220  // End of loop.
9221  lastOffset = size;
9222  }
9223  }
9224  }
9225 
9226  outInfo.unusedBytes = size - outInfo.usedBytes;
9227 }
9228 
9229 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9230 {
9231  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9232  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9233  const VkDeviceSize size = GetSize();
9234  const size_t suballoc1stCount = suballocations1st.size();
9235  const size_t suballoc2ndCount = suballocations2nd.size();
9236 
9237  inoutStats.size += size;
9238 
9239  VkDeviceSize lastOffset = 0;
9240 
9241  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9242  {
9243  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9244  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9245  while(lastOffset < freeSpace2ndTo1stEnd)
9246  {
9247  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9248  while(nextAlloc2ndIndex < suballoc2ndCount &&
9249  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9250  {
9251  ++nextAlloc2ndIndex;
9252  }
9253 
9254  // Found non-null allocation.
9255  if(nextAlloc2ndIndex < suballoc2ndCount)
9256  {
9257  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9258 
9259  // 1. Process free space before this allocation.
9260  if(lastOffset < suballoc.offset)
9261  {
9262  // There is free space from lastOffset to suballoc.offset.
9263  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9264  inoutStats.unusedSize += unusedRangeSize;
9265  ++inoutStats.unusedRangeCount;
9266  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9267  }
9268 
9269  // 2. Process this allocation.
9270  // There is allocation with suballoc.offset, suballoc.size.
9271  ++inoutStats.allocationCount;
9272 
9273  // 3. Prepare for next iteration.
9274  lastOffset = suballoc.offset + suballoc.size;
9275  ++nextAlloc2ndIndex;
9276  }
9277  // We are at the end.
9278  else
9279  {
9280  if(lastOffset < freeSpace2ndTo1stEnd)
9281  {
9282  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9283  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9284  inoutStats.unusedSize += unusedRangeSize;
9285  ++inoutStats.unusedRangeCount;
9286  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9287  }
9288 
9289  // End of loop.
9290  lastOffset = freeSpace2ndTo1stEnd;
9291  }
9292  }
9293  }
9294 
9295  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9296  const VkDeviceSize freeSpace1stTo2ndEnd =
9297  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9298  while(lastOffset < freeSpace1stTo2ndEnd)
9299  {
9300  // Find next non-null allocation or move nextAllocIndex to the end.
9301  while(nextAlloc1stIndex < suballoc1stCount &&
9302  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9303  {
9304  ++nextAlloc1stIndex;
9305  }
9306 
9307  // Found non-null allocation.
9308  if(nextAlloc1stIndex < suballoc1stCount)
9309  {
9310  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9311 
9312  // 1. Process free space before this allocation.
9313  if(lastOffset < suballoc.offset)
9314  {
9315  // There is free space from lastOffset to suballoc.offset.
9316  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9317  inoutStats.unusedSize += unusedRangeSize;
9318  ++inoutStats.unusedRangeCount;
9319  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9320  }
9321 
9322  // 2. Process this allocation.
9323  // There is allocation with suballoc.offset, suballoc.size.
9324  ++inoutStats.allocationCount;
9325 
9326  // 3. Prepare for next iteration.
9327  lastOffset = suballoc.offset + suballoc.size;
9328  ++nextAlloc1stIndex;
9329  }
9330  // We are at the end.
9331  else
9332  {
9333  if(lastOffset < freeSpace1stTo2ndEnd)
9334  {
9335  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9336  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9337  inoutStats.unusedSize += unusedRangeSize;
9338  ++inoutStats.unusedRangeCount;
9339  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9340  }
9341 
9342  // End of loop.
9343  lastOffset = freeSpace1stTo2ndEnd;
9344  }
9345  }
9346 
9347  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9348  {
9349  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9350  while(lastOffset < size)
9351  {
9352  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9353  while(nextAlloc2ndIndex != SIZE_MAX &&
9354  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9355  {
9356  --nextAlloc2ndIndex;
9357  }
9358 
9359  // Found non-null allocation.
9360  if(nextAlloc2ndIndex != SIZE_MAX)
9361  {
9362  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9363 
9364  // 1. Process free space before this allocation.
9365  if(lastOffset < suballoc.offset)
9366  {
9367  // There is free space from lastOffset to suballoc.offset.
9368  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9369  inoutStats.unusedSize += unusedRangeSize;
9370  ++inoutStats.unusedRangeCount;
9371  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9372  }
9373 
9374  // 2. Process this allocation.
9375  // There is allocation with suballoc.offset, suballoc.size.
9376  ++inoutStats.allocationCount;
9377 
9378  // 3. Prepare for next iteration.
9379  lastOffset = suballoc.offset + suballoc.size;
9380  --nextAlloc2ndIndex;
9381  }
9382  // We are at the end.
9383  else
9384  {
9385  if(lastOffset < size)
9386  {
9387  // There is free space from lastOffset to size.
9388  const VkDeviceSize unusedRangeSize = size - lastOffset;
9389  inoutStats.unusedSize += unusedRangeSize;
9390  ++inoutStats.unusedRangeCount;
9391  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9392  }
9393 
9394  // End of loop.
9395  lastOffset = size;
9396  }
9397  }
9398  }
9399 }
9400 
9401 #if VMA_STATS_STRING_ENABLED
9402 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9403 {
9404  const VkDeviceSize size = GetSize();
9405  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9406  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9407  const size_t suballoc1stCount = suballocations1st.size();
9408  const size_t suballoc2ndCount = suballocations2nd.size();
9409 
9410  // FIRST PASS
9411 
9412  size_t unusedRangeCount = 0;
9413  VkDeviceSize usedBytes = 0;
9414 
9415  VkDeviceSize lastOffset = 0;
9416 
9417  size_t alloc2ndCount = 0;
9418  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9419  {
9420  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9421  size_t nextAlloc2ndIndex = 0;
9422  while(lastOffset < freeSpace2ndTo1stEnd)
9423  {
9424  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9425  while(nextAlloc2ndIndex < suballoc2ndCount &&
9426  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9427  {
9428  ++nextAlloc2ndIndex;
9429  }
9430 
9431  // Found non-null allocation.
9432  if(nextAlloc2ndIndex < suballoc2ndCount)
9433  {
9434  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9435 
9436  // 1. Process free space before this allocation.
9437  if(lastOffset < suballoc.offset)
9438  {
9439  // There is free space from lastOffset to suballoc.offset.
9440  ++unusedRangeCount;
9441  }
9442 
9443  // 2. Process this allocation.
9444  // There is allocation with suballoc.offset, suballoc.size.
9445  ++alloc2ndCount;
9446  usedBytes += suballoc.size;
9447 
9448  // 3. Prepare for next iteration.
9449  lastOffset = suballoc.offset + suballoc.size;
9450  ++nextAlloc2ndIndex;
9451  }
9452  // We are at the end.
9453  else
9454  {
9455  if(lastOffset < freeSpace2ndTo1stEnd)
9456  {
9457  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9458  ++unusedRangeCount;
9459  }
9460 
9461  // End of loop.
9462  lastOffset = freeSpace2ndTo1stEnd;
9463  }
9464  }
9465  }
9466 
9467  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9468  size_t alloc1stCount = 0;
9469  const VkDeviceSize freeSpace1stTo2ndEnd =
9470  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9471  while(lastOffset < freeSpace1stTo2ndEnd)
9472  {
9473  // Find next non-null allocation or move nextAllocIndex to the end.
9474  while(nextAlloc1stIndex < suballoc1stCount &&
9475  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9476  {
9477  ++nextAlloc1stIndex;
9478  }
9479 
9480  // Found non-null allocation.
9481  if(nextAlloc1stIndex < suballoc1stCount)
9482  {
9483  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9484 
9485  // 1. Process free space before this allocation.
9486  if(lastOffset < suballoc.offset)
9487  {
9488  // There is free space from lastOffset to suballoc.offset.
9489  ++unusedRangeCount;
9490  }
9491 
9492  // 2. Process this allocation.
9493  // There is allocation with suballoc.offset, suballoc.size.
9494  ++alloc1stCount;
9495  usedBytes += suballoc.size;
9496 
9497  // 3. Prepare for next iteration.
9498  lastOffset = suballoc.offset + suballoc.size;
9499  ++nextAlloc1stIndex;
9500  }
9501  // We are at the end.
9502  else
9503  {
9504  if(lastOffset < size)
9505  {
9506  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9507  ++unusedRangeCount;
9508  }
9509 
9510  // End of loop.
9511  lastOffset = freeSpace1stTo2ndEnd;
9512  }
9513  }
9514 
9515  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9516  {
9517  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9518  while(lastOffset < size)
9519  {
9520  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9521  while(nextAlloc2ndIndex != SIZE_MAX &&
9522  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9523  {
9524  --nextAlloc2ndIndex;
9525  }
9526 
9527  // Found non-null allocation.
9528  if(nextAlloc2ndIndex != SIZE_MAX)
9529  {
9530  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9531 
9532  // 1. Process free space before this allocation.
9533  if(lastOffset < suballoc.offset)
9534  {
9535  // There is free space from lastOffset to suballoc.offset.
9536  ++unusedRangeCount;
9537  }
9538 
9539  // 2. Process this allocation.
9540  // There is allocation with suballoc.offset, suballoc.size.
9541  ++alloc2ndCount;
9542  usedBytes += suballoc.size;
9543 
9544  // 3. Prepare for next iteration.
9545  lastOffset = suballoc.offset + suballoc.size;
9546  --nextAlloc2ndIndex;
9547  }
9548  // We are at the end.
9549  else
9550  {
9551  if(lastOffset < size)
9552  {
9553  // There is free space from lastOffset to size.
9554  ++unusedRangeCount;
9555  }
9556 
9557  // End of loop.
9558  lastOffset = size;
9559  }
9560  }
9561  }
9562 
9563  const VkDeviceSize unusedBytes = size - usedBytes;
9564  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9565 
9566  // SECOND PASS
9567  lastOffset = 0;
9568 
9569  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9570  {
9571  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9572  size_t nextAlloc2ndIndex = 0;
9573  while(lastOffset < freeSpace2ndTo1stEnd)
9574  {
9575  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9576  while(nextAlloc2ndIndex < suballoc2ndCount &&
9577  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9578  {
9579  ++nextAlloc2ndIndex;
9580  }
9581 
9582  // Found non-null allocation.
9583  if(nextAlloc2ndIndex < suballoc2ndCount)
9584  {
9585  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9586 
9587  // 1. Process free space before this allocation.
9588  if(lastOffset < suballoc.offset)
9589  {
9590  // There is free space from lastOffset to suballoc.offset.
9591  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9592  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9593  }
9594 
9595  // 2. Process this allocation.
9596  // There is allocation with suballoc.offset, suballoc.size.
9597  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9598 
9599  // 3. Prepare for next iteration.
9600  lastOffset = suballoc.offset + suballoc.size;
9601  ++nextAlloc2ndIndex;
9602  }
9603  // We are at the end.
9604  else
9605  {
9606  if(lastOffset < freeSpace2ndTo1stEnd)
9607  {
9608  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9609  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9610  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9611  }
9612 
9613  // End of loop.
9614  lastOffset = freeSpace2ndTo1stEnd;
9615  }
9616  }
9617  }
9618 
9619  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9620  while(lastOffset < freeSpace1stTo2ndEnd)
9621  {
9622  // Find next non-null allocation or move nextAllocIndex to the end.
9623  while(nextAlloc1stIndex < suballoc1stCount &&
9624  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9625  {
9626  ++nextAlloc1stIndex;
9627  }
9628 
9629  // Found non-null allocation.
9630  if(nextAlloc1stIndex < suballoc1stCount)
9631  {
9632  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9633 
9634  // 1. Process free space before this allocation.
9635  if(lastOffset < suballoc.offset)
9636  {
9637  // There is free space from lastOffset to suballoc.offset.
9638  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9640  }
9641 
9642  // 2. Process this allocation.
9643  // There is allocation with suballoc.offset, suballoc.size.
9644  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9645 
9646  // 3. Prepare for next iteration.
9647  lastOffset = suballoc.offset + suballoc.size;
9648  ++nextAlloc1stIndex;
9649  }
9650  // We are at the end.
9651  else
9652  {
9653  if(lastOffset < freeSpace1stTo2ndEnd)
9654  {
9655  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9656  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9657  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9658  }
9659 
9660  // End of loop.
9661  lastOffset = freeSpace1stTo2ndEnd;
9662  }
9663  }
9664 
9665  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9666  {
9667  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9668  while(lastOffset < size)
9669  {
9670  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9671  while(nextAlloc2ndIndex != SIZE_MAX &&
9672  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9673  {
9674  --nextAlloc2ndIndex;
9675  }
9676 
9677  // Found non-null allocation.
9678  if(nextAlloc2ndIndex != SIZE_MAX)
9679  {
9680  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9681 
9682  // 1. Process free space before this allocation.
9683  if(lastOffset < suballoc.offset)
9684  {
9685  // There is free space from lastOffset to suballoc.offset.
9686  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9687  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9688  }
9689 
9690  // 2. Process this allocation.
9691  // There is allocation with suballoc.offset, suballoc.size.
9692  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9693 
9694  // 3. Prepare for next iteration.
9695  lastOffset = suballoc.offset + suballoc.size;
9696  --nextAlloc2ndIndex;
9697  }
9698  // We are at the end.
9699  else
9700  {
9701  if(lastOffset < size)
9702  {
9703  // There is free space from lastOffset to size.
9704  const VkDeviceSize unusedRangeSize = size - lastOffset;
9705  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9706  }
9707 
9708  // End of loop.
9709  lastOffset = size;
9710  }
9711  }
9712  }
9713 
9714  PrintDetailedMap_End(json);
9715 }
9716 #endif // #if VMA_STATS_STRING_ENABLED
9717 
9718 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9719  uint32_t currentFrameIndex,
9720  uint32_t frameInUseCount,
9721  VkDeviceSize bufferImageGranularity,
9722  VkDeviceSize allocSize,
9723  VkDeviceSize allocAlignment,
9724  bool upperAddress,
9725  VmaSuballocationType allocType,
9726  bool canMakeOtherLost,
9727  uint32_t strategy,
9728  VmaAllocationRequest* pAllocationRequest)
9729 {
9730  VMA_ASSERT(allocSize > 0);
9731  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9732  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9733  VMA_HEAVY_ASSERT(Validate());
9734  return upperAddress ?
9735  CreateAllocationRequest_UpperAddress(
9736  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9737  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9738  CreateAllocationRequest_LowerAddress(
9739  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9740  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9741 }
9742 
9743 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9744  uint32_t currentFrameIndex,
9745  uint32_t frameInUseCount,
9746  VkDeviceSize bufferImageGranularity,
9747  VkDeviceSize allocSize,
9748  VkDeviceSize allocAlignment,
9749  VmaSuballocationType allocType,
9750  bool canMakeOtherLost,
9751  uint32_t strategy,
9752  VmaAllocationRequest* pAllocationRequest)
9753 {
9754  const VkDeviceSize size = GetSize();
9755  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9756  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9757 
9758  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9759  {
9760  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9761  return false;
9762  }
9763 
9764  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9765  if(allocSize > size)
9766  {
9767  return false;
9768  }
9769  VkDeviceSize resultBaseOffset = size - allocSize;
9770  if(!suballocations2nd.empty())
9771  {
9772  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9773  resultBaseOffset = lastSuballoc.offset - allocSize;
9774  if(allocSize > lastSuballoc.offset)
9775  {
9776  return false;
9777  }
9778  }
9779 
9780  // Start from offset equal to end of free space.
9781  VkDeviceSize resultOffset = resultBaseOffset;
9782 
9783  // Apply VMA_DEBUG_MARGIN at the end.
9784  if(VMA_DEBUG_MARGIN > 0)
9785  {
9786  if(resultOffset < VMA_DEBUG_MARGIN)
9787  {
9788  return false;
9789  }
9790  resultOffset -= VMA_DEBUG_MARGIN;
9791  }
9792 
9793  // Apply alignment.
9794  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9795 
9796  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9797  // Make bigger alignment if necessary.
9798  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9799  {
9800  bool bufferImageGranularityConflict = false;
9801  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9802  {
9803  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9804  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9805  {
9806  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9807  {
9808  bufferImageGranularityConflict = true;
9809  break;
9810  }
9811  }
9812  else
9813  // Already on previous page.
9814  break;
9815  }
9816  if(bufferImageGranularityConflict)
9817  {
9818  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9819  }
9820  }
9821 
9822  // There is enough free space.
9823  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9824  suballocations1st.back().offset + suballocations1st.back().size :
9825  0;
9826  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9827  {
9828  // Check previous suballocations for BufferImageGranularity conflicts.
9829  // If conflict exists, allocation cannot be made here.
9830  if(bufferImageGranularity > 1)
9831  {
9832  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9833  {
9834  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9835  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9836  {
9837  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9838  {
9839  return false;
9840  }
9841  }
9842  else
9843  {
9844  // Already on next page.
9845  break;
9846  }
9847  }
9848  }
9849 
9850  // All tests passed: Success.
9851  pAllocationRequest->offset = resultOffset;
9852  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9853  pAllocationRequest->sumItemSize = 0;
9854  // pAllocationRequest->item unused.
9855  pAllocationRequest->itemsToMakeLostCount = 0;
9856  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9857  return true;
9858  }
9859 
9860  return false;
9861 }
9862 
9863 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9864  uint32_t currentFrameIndex,
9865  uint32_t frameInUseCount,
9866  VkDeviceSize bufferImageGranularity,
9867  VkDeviceSize allocSize,
9868  VkDeviceSize allocAlignment,
9869  VmaSuballocationType allocType,
9870  bool canMakeOtherLost,
9871  uint32_t strategy,
9872  VmaAllocationRequest* pAllocationRequest)
9873 {
9874  const VkDeviceSize size = GetSize();
9875  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9876  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9877 
9878  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9879  {
9880  // Try to allocate at the end of 1st vector.
9881 
9882  VkDeviceSize resultBaseOffset = 0;
9883  if(!suballocations1st.empty())
9884  {
9885  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9886  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9887  }
9888 
9889  // Start from offset equal to beginning of free space.
9890  VkDeviceSize resultOffset = resultBaseOffset;
9891 
9892  // Apply VMA_DEBUG_MARGIN at the beginning.
9893  if(VMA_DEBUG_MARGIN > 0)
9894  {
9895  resultOffset += VMA_DEBUG_MARGIN;
9896  }
9897 
9898  // Apply alignment.
9899  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9900 
9901  // Check previous suballocations for BufferImageGranularity conflicts.
9902  // Make bigger alignment if necessary.
9903  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9904  {
9905  bool bufferImageGranularityConflict = false;
9906  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9907  {
9908  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9909  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9910  {
9911  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9912  {
9913  bufferImageGranularityConflict = true;
9914  break;
9915  }
9916  }
9917  else
9918  // Already on previous page.
9919  break;
9920  }
9921  if(bufferImageGranularityConflict)
9922  {
9923  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9924  }
9925  }
9926 
9927  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9928  suballocations2nd.back().offset : size;
9929 
9930  // There is enough free space at the end after alignment.
9931  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9932  {
9933  // Check next suballocations for BufferImageGranularity conflicts.
9934  // If conflict exists, allocation cannot be made here.
9935  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9936  {
9937  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9938  {
9939  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9940  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9941  {
9942  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9943  {
9944  return false;
9945  }
9946  }
9947  else
9948  {
9949  // Already on previous page.
9950  break;
9951  }
9952  }
9953  }
9954 
9955  // All tests passed: Success.
9956  pAllocationRequest->offset = resultOffset;
9957  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9958  pAllocationRequest->sumItemSize = 0;
9959  // pAllocationRequest->item, customData unused.
9960  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9961  pAllocationRequest->itemsToMakeLostCount = 0;
9962  return true;
9963  }
9964  }
9965 
9966  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9967  // beginning of 1st vector as the end of free space.
9968  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9969  {
9970  VMA_ASSERT(!suballocations1st.empty());
9971 
9972  VkDeviceSize resultBaseOffset = 0;
9973  if(!suballocations2nd.empty())
9974  {
9975  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9976  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9977  }
9978 
9979  // Start from offset equal to beginning of free space.
9980  VkDeviceSize resultOffset = resultBaseOffset;
9981 
9982  // Apply VMA_DEBUG_MARGIN at the beginning.
9983  if(VMA_DEBUG_MARGIN > 0)
9984  {
9985  resultOffset += VMA_DEBUG_MARGIN;
9986  }
9987 
9988  // Apply alignment.
9989  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9990 
9991  // Check previous suballocations for BufferImageGranularity conflicts.
9992  // Make bigger alignment if necessary.
9993  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9994  {
9995  bool bufferImageGranularityConflict = false;
9996  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9997  {
9998  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9999  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10000  {
10001  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10002  {
10003  bufferImageGranularityConflict = true;
10004  break;
10005  }
10006  }
10007  else
10008  // Already on previous page.
10009  break;
10010  }
10011  if(bufferImageGranularityConflict)
10012  {
10013  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10014  }
10015  }
10016 
10017  pAllocationRequest->itemsToMakeLostCount = 0;
10018  pAllocationRequest->sumItemSize = 0;
10019  size_t index1st = m_1stNullItemsBeginCount;
10020 
10021  if(canMakeOtherLost)
10022  {
10023  while(index1st < suballocations1st.size() &&
10024  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10025  {
10026  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10027  const VmaSuballocation& suballoc = suballocations1st[index1st];
10028  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10029  {
10030  // No problem.
10031  }
10032  else
10033  {
10034  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10035  if(suballoc.hAllocation->CanBecomeLost() &&
10036  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10037  {
10038  ++pAllocationRequest->itemsToMakeLostCount;
10039  pAllocationRequest->sumItemSize += suballoc.size;
10040  }
10041  else
10042  {
10043  return false;
10044  }
10045  }
10046  ++index1st;
10047  }
10048 
10049  // Check next suballocations for BufferImageGranularity conflicts.
10050  // If conflict exists, we must mark more allocations lost or fail.
10051  if(bufferImageGranularity > 1)
10052  {
10053  while(index1st < suballocations1st.size())
10054  {
10055  const VmaSuballocation& suballoc = suballocations1st[index1st];
10056  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10057  {
10058  if(suballoc.hAllocation != VK_NULL_HANDLE)
10059  {
10060  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10061  if(suballoc.hAllocation->CanBecomeLost() &&
10062  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10063  {
10064  ++pAllocationRequest->itemsToMakeLostCount;
10065  pAllocationRequest->sumItemSize += suballoc.size;
10066  }
10067  else
10068  {
10069  return false;
10070  }
10071  }
10072  }
10073  else
10074  {
10075  // Already on next page.
10076  break;
10077  }
10078  ++index1st;
10079  }
10080  }
10081 
10082  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10083  if(index1st == suballocations1st.size() &&
10084  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10085  {
10086  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10087  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10088  }
10089  }
10090 
10091  // There is enough free space at the end after alignment.
10092  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10093  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10094  {
10095  // Check next suballocations for BufferImageGranularity conflicts.
10096  // If conflict exists, allocation cannot be made here.
10097  if(bufferImageGranularity > 1)
10098  {
10099  for(size_t nextSuballocIndex = index1st;
10100  nextSuballocIndex < suballocations1st.size();
10101  nextSuballocIndex++)
10102  {
10103  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10104  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10105  {
10106  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10107  {
10108  return false;
10109  }
10110  }
10111  else
10112  {
10113  // Already on next page.
10114  break;
10115  }
10116  }
10117  }
10118 
10119  // All tests passed: Success.
10120  pAllocationRequest->offset = resultOffset;
10121  pAllocationRequest->sumFreeSize =
10122  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10123  - resultBaseOffset
10124  - pAllocationRequest->sumItemSize;
10125  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10126  // pAllocationRequest->item, customData unused.
10127  return true;
10128  }
10129  }
10130 
10131  return false;
10132 }
10133 
10134 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10135  uint32_t currentFrameIndex,
10136  uint32_t frameInUseCount,
10137  VmaAllocationRequest* pAllocationRequest)
10138 {
10139  if(pAllocationRequest->itemsToMakeLostCount == 0)
10140  {
10141  return true;
10142  }
10143 
10144  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10145 
10146  // We always start from 1st.
10147  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10148  size_t index = m_1stNullItemsBeginCount;
10149  size_t madeLostCount = 0;
10150  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10151  {
10152  if(index == suballocations->size())
10153  {
10154  index = 0;
10155  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10156  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10157  {
10158  suballocations = &AccessSuballocations2nd();
10159  }
10160  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10161  // suballocations continues pointing at AccessSuballocations1st().
10162  VMA_ASSERT(!suballocations->empty());
10163  }
10164  VmaSuballocation& suballoc = (*suballocations)[index];
10165  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10166  {
10167  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10168  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10169  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10170  {
10171  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10172  suballoc.hAllocation = VK_NULL_HANDLE;
10173  m_SumFreeSize += suballoc.size;
10174  if(suballocations == &AccessSuballocations1st())
10175  {
10176  ++m_1stNullItemsMiddleCount;
10177  }
10178  else
10179  {
10180  ++m_2ndNullItemsCount;
10181  }
10182  ++madeLostCount;
10183  }
10184  else
10185  {
10186  return false;
10187  }
10188  }
10189  ++index;
10190  }
10191 
10192  CleanupAfterFree();
10193  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10194 
10195  return true;
10196 }
10197 
10198 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10199 {
10200  uint32_t lostAllocationCount = 0;
10201 
10202  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10203  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10204  {
10205  VmaSuballocation& suballoc = suballocations1st[i];
10206  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10207  suballoc.hAllocation->CanBecomeLost() &&
10208  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10209  {
10210  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10211  suballoc.hAllocation = VK_NULL_HANDLE;
10212  ++m_1stNullItemsMiddleCount;
10213  m_SumFreeSize += suballoc.size;
10214  ++lostAllocationCount;
10215  }
10216  }
10217 
10218  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10219  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10220  {
10221  VmaSuballocation& suballoc = suballocations2nd[i];
10222  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10223  suballoc.hAllocation->CanBecomeLost() &&
10224  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10225  {
10226  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10227  suballoc.hAllocation = VK_NULL_HANDLE;
10228  ++m_2ndNullItemsCount;
10229  m_SumFreeSize += suballoc.size;
10230  ++lostAllocationCount;
10231  }
10232  }
10233 
10234  if(lostAllocationCount)
10235  {
10236  CleanupAfterFree();
10237  }
10238 
10239  return lostAllocationCount;
10240 }
10241 
10242 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10243 {
10244  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10245  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10246  {
10247  const VmaSuballocation& suballoc = suballocations1st[i];
10248  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10249  {
10250  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10251  {
10252  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10253  return VK_ERROR_VALIDATION_FAILED_EXT;
10254  }
10255  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10256  {
10257  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10258  return VK_ERROR_VALIDATION_FAILED_EXT;
10259  }
10260  }
10261  }
10262 
10263  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10264  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10265  {
10266  const VmaSuballocation& suballoc = suballocations2nd[i];
10267  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10268  {
10269  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10270  {
10271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10272  return VK_ERROR_VALIDATION_FAILED_EXT;
10273  }
10274  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10275  {
10276  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10277  return VK_ERROR_VALIDATION_FAILED_EXT;
10278  }
10279  }
10280  }
10281 
10282  return VK_SUCCESS;
10283 }
10284 
10285 void VmaBlockMetadata_Linear::Alloc(
10286  const VmaAllocationRequest& request,
10287  VmaSuballocationType type,
10288  VkDeviceSize allocSize,
10289  VmaAllocation hAllocation)
10290 {
10291  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10292 
10293  switch(request.type)
10294  {
10295  case VmaAllocationRequestType::UpperAddress:
10296  {
10297  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10298  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10300  suballocations2nd.push_back(newSuballoc);
10301  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10302  }
10303  break;
10304  case VmaAllocationRequestType::EndOf1st:
10305  {
10306  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10307 
10308  VMA_ASSERT(suballocations1st.empty() ||
10309  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10310  // Check if it fits before the end of the block.
10311  VMA_ASSERT(request.offset + allocSize <= GetSize());
10312 
10313  suballocations1st.push_back(newSuballoc);
10314  }
10315  break;
10316  case VmaAllocationRequestType::EndOf2nd:
10317  {
10318  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10319  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10320  VMA_ASSERT(!suballocations1st.empty() &&
10321  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10322  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10323 
10324  switch(m_2ndVectorMode)
10325  {
10326  case SECOND_VECTOR_EMPTY:
10327  // First allocation from second part ring buffer.
10328  VMA_ASSERT(suballocations2nd.empty());
10329  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10330  break;
10331  case SECOND_VECTOR_RING_BUFFER:
10332  // 2-part ring buffer is already started.
10333  VMA_ASSERT(!suballocations2nd.empty());
10334  break;
10335  case SECOND_VECTOR_DOUBLE_STACK:
10336  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10337  break;
10338  default:
10339  VMA_ASSERT(0);
10340  }
10341 
10342  suballocations2nd.push_back(newSuballoc);
10343  }
10344  break;
10345  default:
10346  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10347  }
10348 
10349  m_SumFreeSize -= newSuballoc.size;
10350 }
10351 
10352 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10353 {
10354  FreeAtOffset(allocation->GetOffset());
10355 }
10356 
10357 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10358 {
10359  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10360  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10361 
10362  if(!suballocations1st.empty())
10363  {
10364  // First allocation: Mark it as next empty at the beginning.
10365  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10366  if(firstSuballoc.offset == offset)
10367  {
10368  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10369  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10370  m_SumFreeSize += firstSuballoc.size;
10371  ++m_1stNullItemsBeginCount;
10372  CleanupAfterFree();
10373  return;
10374  }
10375  }
10376 
10377  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10378  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10379  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10380  {
10381  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10382  if(lastSuballoc.offset == offset)
10383  {
10384  m_SumFreeSize += lastSuballoc.size;
10385  suballocations2nd.pop_back();
10386  CleanupAfterFree();
10387  return;
10388  }
10389  }
10390  // Last allocation in 1st vector.
10391  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10392  {
10393  VmaSuballocation& lastSuballoc = suballocations1st.back();
10394  if(lastSuballoc.offset == offset)
10395  {
10396  m_SumFreeSize += lastSuballoc.size;
10397  suballocations1st.pop_back();
10398  CleanupAfterFree();
10399  return;
10400  }
10401  }
10402 
10403  // Item from the middle of 1st vector.
10404  {
10405  VmaSuballocation refSuballoc;
10406  refSuballoc.offset = offset;
10407  // Rest of members stays uninitialized intentionally for better performance.
10408  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10409  suballocations1st.begin() + m_1stNullItemsBeginCount,
10410  suballocations1st.end(),
10411  refSuballoc);
10412  if(it != suballocations1st.end())
10413  {
10414  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10415  it->hAllocation = VK_NULL_HANDLE;
10416  ++m_1stNullItemsMiddleCount;
10417  m_SumFreeSize += it->size;
10418  CleanupAfterFree();
10419  return;
10420  }
10421  }
10422 
10423  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10424  {
10425  // Item from the middle of 2nd vector.
10426  VmaSuballocation refSuballoc;
10427  refSuballoc.offset = offset;
10428  // Rest of members stays uninitialized intentionally for better performance.
10429  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10430  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10431  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10432  if(it != suballocations2nd.end())
10433  {
10434  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10435  it->hAllocation = VK_NULL_HANDLE;
10436  ++m_2ndNullItemsCount;
10437  m_SumFreeSize += it->size;
10438  CleanupAfterFree();
10439  return;
10440  }
10441  }
10442 
10443  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10444 }
10445 
10446 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10447 {
10448  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10449  const size_t suballocCount = AccessSuballocations1st().size();
10450  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10451 }
10452 
10453 void VmaBlockMetadata_Linear::CleanupAfterFree()
10454 {
10455  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10456  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10457 
10458  if(IsEmpty())
10459  {
10460  suballocations1st.clear();
10461  suballocations2nd.clear();
10462  m_1stNullItemsBeginCount = 0;
10463  m_1stNullItemsMiddleCount = 0;
10464  m_2ndNullItemsCount = 0;
10465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10466  }
10467  else
10468  {
10469  const size_t suballoc1stCount = suballocations1st.size();
10470  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10471  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10472 
10473  // Find more null items at the beginning of 1st vector.
10474  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10475  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10476  {
10477  ++m_1stNullItemsBeginCount;
10478  --m_1stNullItemsMiddleCount;
10479  }
10480 
10481  // Find more null items at the end of 1st vector.
10482  while(m_1stNullItemsMiddleCount > 0 &&
10483  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10484  {
10485  --m_1stNullItemsMiddleCount;
10486  suballocations1st.pop_back();
10487  }
10488 
10489  // Find more null items at the end of 2nd vector.
10490  while(m_2ndNullItemsCount > 0 &&
10491  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10492  {
10493  --m_2ndNullItemsCount;
10494  suballocations2nd.pop_back();
10495  }
10496 
10497  // Find more null items at the beginning of 2nd vector.
10498  while(m_2ndNullItemsCount > 0 &&
10499  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10500  {
10501  --m_2ndNullItemsCount;
10502  suballocations2nd.remove(0);
10503  }
10504 
10505  if(ShouldCompact1st())
10506  {
10507  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10508  size_t srcIndex = m_1stNullItemsBeginCount;
10509  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10510  {
10511  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10512  {
10513  ++srcIndex;
10514  }
10515  if(dstIndex != srcIndex)
10516  {
10517  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10518  }
10519  ++srcIndex;
10520  }
10521  suballocations1st.resize(nonNullItemCount);
10522  m_1stNullItemsBeginCount = 0;
10523  m_1stNullItemsMiddleCount = 0;
10524  }
10525 
10526  // 2nd vector became empty.
10527  if(suballocations2nd.empty())
10528  {
10529  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10530  }
10531 
10532  // 1st vector became empty.
10533  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10534  {
10535  suballocations1st.clear();
10536  m_1stNullItemsBeginCount = 0;
10537 
10538  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10539  {
10540  // Swap 1st with 2nd. Now 2nd is empty.
10541  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10542  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10543  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10544  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10545  {
10546  ++m_1stNullItemsBeginCount;
10547  --m_1stNullItemsMiddleCount;
10548  }
10549  m_2ndNullItemsCount = 0;
10550  m_1stVectorIndex ^= 1;
10551  }
10552  }
10553  }
10554 
10555  VMA_HEAVY_ASSERT(Validate());
10556 }
10557 
10558 
10560 // class VmaBlockMetadata_Buddy
10561 
10562 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10563  VmaBlockMetadata(hAllocator),
10564  m_Root(VMA_NULL),
10565  m_AllocationCount(0),
10566  m_FreeCount(1),
10567  m_SumFreeSize(0)
10568 {
10569  memset(m_FreeList, 0, sizeof(m_FreeList));
10570 }
10571 
10572 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10573 {
10574  DeleteNode(m_Root);
10575 }
10576 
10577 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10578 {
10579  VmaBlockMetadata::Init(size);
10580 
10581  m_UsableSize = VmaPrevPow2(size);
10582  m_SumFreeSize = m_UsableSize;
10583 
10584  // Calculate m_LevelCount.
10585  m_LevelCount = 1;
10586  while(m_LevelCount < MAX_LEVELS &&
10587  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10588  {
10589  ++m_LevelCount;
10590  }
10591 
10592  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10593  rootNode->offset = 0;
10594  rootNode->type = Node::TYPE_FREE;
10595  rootNode->parent = VMA_NULL;
10596  rootNode->buddy = VMA_NULL;
10597 
10598  m_Root = rootNode;
10599  AddToFreeListFront(0, rootNode);
10600 }
10601 
10602 bool VmaBlockMetadata_Buddy::Validate() const
10603 {
10604  // Validate tree.
10605  ValidationContext ctx;
10606  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10607  {
10608  VMA_VALIDATE(false && "ValidateNode failed.");
10609  }
10610  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10611  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10612 
10613  // Validate free node lists.
10614  for(uint32_t level = 0; level < m_LevelCount; ++level)
10615  {
10616  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10617  m_FreeList[level].front->free.prev == VMA_NULL);
10618 
10619  for(Node* node = m_FreeList[level].front;
10620  node != VMA_NULL;
10621  node = node->free.next)
10622  {
10623  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10624 
10625  if(node->free.next == VMA_NULL)
10626  {
10627  VMA_VALIDATE(m_FreeList[level].back == node);
10628  }
10629  else
10630  {
10631  VMA_VALIDATE(node->free.next->free.prev == node);
10632  }
10633  }
10634  }
10635 
10636  // Validate that free lists ar higher levels are empty.
10637  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10638  {
10639  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10640  }
10641 
10642  return true;
10643 }
10644 
10645 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10646 {
10647  for(uint32_t level = 0; level < m_LevelCount; ++level)
10648  {
10649  if(m_FreeList[level].front != VMA_NULL)
10650  {
10651  return LevelToNodeSize(level);
10652  }
10653  }
10654  return 0;
10655 }
10656 
10657 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10658 {
10659  const VkDeviceSize unusableSize = GetUnusableSize();
10660 
10661  outInfo.blockCount = 1;
10662 
10663  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10664  outInfo.usedBytes = outInfo.unusedBytes = 0;
10665 
10666  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10667  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10668  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10669 
10670  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10671 
10672  if(unusableSize > 0)
10673  {
10674  ++outInfo.unusedRangeCount;
10675  outInfo.unusedBytes += unusableSize;
10676  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10677  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10678  }
10679 }
10680 
10681 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10682 {
10683  const VkDeviceSize unusableSize = GetUnusableSize();
10684 
10685  inoutStats.size += GetSize();
10686  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10687  inoutStats.allocationCount += m_AllocationCount;
10688  inoutStats.unusedRangeCount += m_FreeCount;
10689  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10690 
10691  if(unusableSize > 0)
10692  {
10693  ++inoutStats.unusedRangeCount;
10694  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10695  }
10696 }
10697 
10698 #if VMA_STATS_STRING_ENABLED
10699 
10700 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10701 {
10702  // TODO optimize
10703  VmaStatInfo stat;
10704  CalcAllocationStatInfo(stat);
10705 
10706  PrintDetailedMap_Begin(
10707  json,
10708  stat.unusedBytes,
10709  stat.allocationCount,
10710  stat.unusedRangeCount);
10711 
10712  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10713 
10714  const VkDeviceSize unusableSize = GetUnusableSize();
10715  if(unusableSize > 0)
10716  {
10717  PrintDetailedMap_UnusedRange(json,
10718  m_UsableSize, // offset
10719  unusableSize); // size
10720  }
10721 
10722  PrintDetailedMap_End(json);
10723 }
10724 
10725 #endif // #if VMA_STATS_STRING_ENABLED
10726 
10727 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10728  uint32_t currentFrameIndex,
10729  uint32_t frameInUseCount,
10730  VkDeviceSize bufferImageGranularity,
10731  VkDeviceSize allocSize,
10732  VkDeviceSize allocAlignment,
10733  bool upperAddress,
10734  VmaSuballocationType allocType,
10735  bool canMakeOtherLost,
10736  uint32_t strategy,
10737  VmaAllocationRequest* pAllocationRequest)
10738 {
10739  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10740 
10741  // Simple way to respect bufferImageGranularity. May be optimized some day.
10742  // Whenever it might be an OPTIMAL image...
10743  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10744  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10745  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10746  {
10747  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10748  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10749  }
10750 
10751  if(allocSize > m_UsableSize)
10752  {
10753  return false;
10754  }
10755 
10756  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10757  for(uint32_t level = targetLevel + 1; level--; )
10758  {
10759  for(Node* freeNode = m_FreeList[level].front;
10760  freeNode != VMA_NULL;
10761  freeNode = freeNode->free.next)
10762  {
10763  if(freeNode->offset % allocAlignment == 0)
10764  {
10765  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10766  pAllocationRequest->offset = freeNode->offset;
10767  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10768  pAllocationRequest->sumItemSize = 0;
10769  pAllocationRequest->itemsToMakeLostCount = 0;
10770  pAllocationRequest->customData = (void*)(uintptr_t)level;
10771  return true;
10772  }
10773  }
10774  }
10775 
10776  return false;
10777 }
10778 
10779 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10780  uint32_t currentFrameIndex,
10781  uint32_t frameInUseCount,
10782  VmaAllocationRequest* pAllocationRequest)
10783 {
10784  /*
10785  Lost allocations are not supported in buddy allocator at the moment.
10786  Support might be added in the future.
10787  */
10788  return pAllocationRequest->itemsToMakeLostCount == 0;
10789 }
10790 
10791 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10792 {
10793  /*
10794  Lost allocations are not supported in buddy allocator at the moment.
10795  Support might be added in the future.
10796  */
10797  return 0;
10798 }
10799 
10800 void VmaBlockMetadata_Buddy::Alloc(
10801  const VmaAllocationRequest& request,
10802  VmaSuballocationType type,
10803  VkDeviceSize allocSize,
10804  VmaAllocation hAllocation)
10805 {
10806  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10807 
10808  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10809  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10810 
10811  Node* currNode = m_FreeList[currLevel].front;
10812  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10813  while(currNode->offset != request.offset)
10814  {
10815  currNode = currNode->free.next;
10816  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10817  }
10818 
10819  // Go down, splitting free nodes.
10820  while(currLevel < targetLevel)
10821  {
10822  // currNode is already first free node at currLevel.
10823  // Remove it from list of free nodes at this currLevel.
10824  RemoveFromFreeList(currLevel, currNode);
10825 
10826  const uint32_t childrenLevel = currLevel + 1;
10827 
10828  // Create two free sub-nodes.
10829  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10830  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10831 
10832  leftChild->offset = currNode->offset;
10833  leftChild->type = Node::TYPE_FREE;
10834  leftChild->parent = currNode;
10835  leftChild->buddy = rightChild;
10836 
10837  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10838  rightChild->type = Node::TYPE_FREE;
10839  rightChild->parent = currNode;
10840  rightChild->buddy = leftChild;
10841 
10842  // Convert current currNode to split type.
10843  currNode->type = Node::TYPE_SPLIT;
10844  currNode->split.leftChild = leftChild;
10845 
10846  // Add child nodes to free list. Order is important!
10847  AddToFreeListFront(childrenLevel, rightChild);
10848  AddToFreeListFront(childrenLevel, leftChild);
10849 
10850  ++m_FreeCount;
10851  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10852  ++currLevel;
10853  currNode = m_FreeList[currLevel].front;
10854 
10855  /*
10856  We can be sure that currNode, as left child of node previously split,
10857  also fullfills the alignment requirement.
10858  */
10859  }
10860 
10861  // Remove from free list.
10862  VMA_ASSERT(currLevel == targetLevel &&
10863  currNode != VMA_NULL &&
10864  currNode->type == Node::TYPE_FREE);
10865  RemoveFromFreeList(currLevel, currNode);
10866 
10867  // Convert to allocation node.
10868  currNode->type = Node::TYPE_ALLOCATION;
10869  currNode->allocation.alloc = hAllocation;
10870 
10871  ++m_AllocationCount;
10872  --m_FreeCount;
10873  m_SumFreeSize -= allocSize;
10874 }
10875 
10876 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10877 {
10878  if(node->type == Node::TYPE_SPLIT)
10879  {
10880  DeleteNode(node->split.leftChild->buddy);
10881  DeleteNode(node->split.leftChild);
10882  }
10883 
10884  vma_delete(GetAllocationCallbacks(), node);
10885 }
10886 
10887 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10888 {
10889  VMA_VALIDATE(level < m_LevelCount);
10890  VMA_VALIDATE(curr->parent == parent);
10891  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10892  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10893  switch(curr->type)
10894  {
10895  case Node::TYPE_FREE:
10896  // curr->free.prev, next are validated separately.
10897  ctx.calculatedSumFreeSize += levelNodeSize;
10898  ++ctx.calculatedFreeCount;
10899  break;
10900  case Node::TYPE_ALLOCATION:
10901  ++ctx.calculatedAllocationCount;
10902  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10903  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10904  break;
10905  case Node::TYPE_SPLIT:
10906  {
10907  const uint32_t childrenLevel = level + 1;
10908  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10909  const Node* const leftChild = curr->split.leftChild;
10910  VMA_VALIDATE(leftChild != VMA_NULL);
10911  VMA_VALIDATE(leftChild->offset == curr->offset);
10912  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10913  {
10914  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10915  }
10916  const Node* const rightChild = leftChild->buddy;
10917  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10918  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10919  {
10920  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10921  }
10922  }
10923  break;
10924  default:
10925  return false;
10926  }
10927 
10928  return true;
10929 }
10930 
10931 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10932 {
10933  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10934  uint32_t level = 0;
10935  VkDeviceSize currLevelNodeSize = m_UsableSize;
10936  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10937  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10938  {
10939  ++level;
10940  currLevelNodeSize = nextLevelNodeSize;
10941  nextLevelNodeSize = currLevelNodeSize >> 1;
10942  }
10943  return level;
10944 }
10945 
10946 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10947 {
10948  // Find node and level.
10949  Node* node = m_Root;
10950  VkDeviceSize nodeOffset = 0;
10951  uint32_t level = 0;
10952  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10953  while(node->type == Node::TYPE_SPLIT)
10954  {
10955  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10956  if(offset < nodeOffset + nextLevelSize)
10957  {
10958  node = node->split.leftChild;
10959  }
10960  else
10961  {
10962  node = node->split.leftChild->buddy;
10963  nodeOffset += nextLevelSize;
10964  }
10965  ++level;
10966  levelNodeSize = nextLevelSize;
10967  }
10968 
10969  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10970  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10971 
10972  ++m_FreeCount;
10973  --m_AllocationCount;
10974  m_SumFreeSize += alloc->GetSize();
10975 
10976  node->type = Node::TYPE_FREE;
10977 
10978  // Join free nodes if possible.
10979  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10980  {
10981  RemoveFromFreeList(level, node->buddy);
10982  Node* const parent = node->parent;
10983 
10984  vma_delete(GetAllocationCallbacks(), node->buddy);
10985  vma_delete(GetAllocationCallbacks(), node);
10986  parent->type = Node::TYPE_FREE;
10987 
10988  node = parent;
10989  --level;
10990  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10991  --m_FreeCount;
10992  }
10993 
10994  AddToFreeListFront(level, node);
10995 }
10996 
10997 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10998 {
10999  switch(node->type)
11000  {
11001  case Node::TYPE_FREE:
11002  ++outInfo.unusedRangeCount;
11003  outInfo.unusedBytes += levelNodeSize;
11004  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11005  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11006  break;
11007  case Node::TYPE_ALLOCATION:
11008  {
11009  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11010  ++outInfo.allocationCount;
11011  outInfo.usedBytes += allocSize;
11012  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11013  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11014 
11015  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11016  if(unusedRangeSize > 0)
11017  {
11018  ++outInfo.unusedRangeCount;
11019  outInfo.unusedBytes += unusedRangeSize;
11020  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11021  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11022  }
11023  }
11024  break;
11025  case Node::TYPE_SPLIT:
11026  {
11027  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11028  const Node* const leftChild = node->split.leftChild;
11029  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11030  const Node* const rightChild = leftChild->buddy;
11031  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11032  }
11033  break;
11034  default:
11035  VMA_ASSERT(0);
11036  }
11037 }
11038 
11039 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11040 {
11041  VMA_ASSERT(node->type == Node::TYPE_FREE);
11042 
11043  // List is empty.
11044  Node* const frontNode = m_FreeList[level].front;
11045  if(frontNode == VMA_NULL)
11046  {
11047  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11048  node->free.prev = node->free.next = VMA_NULL;
11049  m_FreeList[level].front = m_FreeList[level].back = node;
11050  }
11051  else
11052  {
11053  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11054  node->free.prev = VMA_NULL;
11055  node->free.next = frontNode;
11056  frontNode->free.prev = node;
11057  m_FreeList[level].front = node;
11058  }
11059 }
11060 
11061 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11062 {
11063  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11064 
11065  // It is at the front.
11066  if(node->free.prev == VMA_NULL)
11067  {
11068  VMA_ASSERT(m_FreeList[level].front == node);
11069  m_FreeList[level].front = node->free.next;
11070  }
11071  else
11072  {
11073  Node* const prevFreeNode = node->free.prev;
11074  VMA_ASSERT(prevFreeNode->free.next == node);
11075  prevFreeNode->free.next = node->free.next;
11076  }
11077 
11078  // It is at the back.
11079  if(node->free.next == VMA_NULL)
11080  {
11081  VMA_ASSERT(m_FreeList[level].back == node);
11082  m_FreeList[level].back = node->free.prev;
11083  }
11084  else
11085  {
11086  Node* const nextFreeNode = node->free.next;
11087  VMA_ASSERT(nextFreeNode->free.prev == node);
11088  nextFreeNode->free.prev = node->free.prev;
11089  }
11090 }
11091 
11092 #if VMA_STATS_STRING_ENABLED
11093 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11094 {
11095  switch(node->type)
11096  {
11097  case Node::TYPE_FREE:
11098  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11099  break;
11100  case Node::TYPE_ALLOCATION:
11101  {
11102  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11103  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11104  if(allocSize < levelNodeSize)
11105  {
11106  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11107  }
11108  }
11109  break;
11110  case Node::TYPE_SPLIT:
11111  {
11112  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11113  const Node* const leftChild = node->split.leftChild;
11114  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11115  const Node* const rightChild = leftChild->buddy;
11116  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11117  }
11118  break;
11119  default:
11120  VMA_ASSERT(0);
11121  }
11122 }
11123 #endif // #if VMA_STATS_STRING_ENABLED
11124 
11125 
11127 // class VmaDeviceMemoryBlock
11128 
11129 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11130  m_pMetadata(VMA_NULL),
11131  m_MemoryTypeIndex(UINT32_MAX),
11132  m_Id(0),
11133  m_hMemory(VK_NULL_HANDLE),
11134  m_MapCount(0),
11135  m_pMappedData(VMA_NULL)
11136 {
11137 }
11138 
11139 void VmaDeviceMemoryBlock::Init(
11140  VmaAllocator hAllocator,
11141  VmaPool hParentPool,
11142  uint32_t newMemoryTypeIndex,
11143  VkDeviceMemory newMemory,
11144  VkDeviceSize newSize,
11145  uint32_t id,
11146  uint32_t algorithm)
11147 {
11148  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11149 
11150  m_hParentPool = hParentPool;
11151  m_MemoryTypeIndex = newMemoryTypeIndex;
11152  m_Id = id;
11153  m_hMemory = newMemory;
11154 
11155  switch(algorithm)
11156  {
11158  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11159  break;
11161  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11162  break;
11163  default:
11164  VMA_ASSERT(0);
11165  // Fall-through.
11166  case 0:
11167  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11168  }
11169  m_pMetadata->Init(newSize);
11170 }
11171 
11172 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11173 {
11174  // This is the most important assert in the entire library.
11175  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11176  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11177 
11178  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11179  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11180  m_hMemory = VK_NULL_HANDLE;
11181 
11182  vma_delete(allocator, m_pMetadata);
11183  m_pMetadata = VMA_NULL;
11184 }
11185 
11186 bool VmaDeviceMemoryBlock::Validate() const
11187 {
11188  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11189  (m_pMetadata->GetSize() != 0));
11190 
11191  return m_pMetadata->Validate();
11192 }
11193 
11194 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11195 {
11196  void* pData = nullptr;
11197  VkResult res = Map(hAllocator, 1, &pData);
11198  if(res != VK_SUCCESS)
11199  {
11200  return res;
11201  }
11202 
11203  res = m_pMetadata->CheckCorruption(pData);
11204 
11205  Unmap(hAllocator, 1);
11206 
11207  return res;
11208 }
11209 
11210 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11211 {
11212  if(count == 0)
11213  {
11214  return VK_SUCCESS;
11215  }
11216 
11217  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11218  if(m_MapCount != 0)
11219  {
11220  m_MapCount += count;
11221  VMA_ASSERT(m_pMappedData != VMA_NULL);
11222  if(ppData != VMA_NULL)
11223  {
11224  *ppData = m_pMappedData;
11225  }
11226  return VK_SUCCESS;
11227  }
11228  else
11229  {
11230  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11231  hAllocator->m_hDevice,
11232  m_hMemory,
11233  0, // offset
11234  VK_WHOLE_SIZE,
11235  0, // flags
11236  &m_pMappedData);
11237  if(result == VK_SUCCESS)
11238  {
11239  if(ppData != VMA_NULL)
11240  {
11241  *ppData = m_pMappedData;
11242  }
11243  m_MapCount = count;
11244  }
11245  return result;
11246  }
11247 }
11248 
11249 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11250 {
11251  if(count == 0)
11252  {
11253  return;
11254  }
11255 
11256  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11257  if(m_MapCount >= count)
11258  {
11259  m_MapCount -= count;
11260  if(m_MapCount == 0)
11261  {
11262  m_pMappedData = VMA_NULL;
11263  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11264  }
11265  }
11266  else
11267  {
11268  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11269  }
11270 }
11271 
11272 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11273 {
11274  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11275  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11276 
11277  void* pData;
11278  VkResult res = Map(hAllocator, 1, &pData);
11279  if(res != VK_SUCCESS)
11280  {
11281  return res;
11282  }
11283 
11284  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11285  VmaWriteMagicValue(pData, allocOffset + allocSize);
11286 
11287  Unmap(hAllocator, 1);
11288 
11289  return VK_SUCCESS;
11290 }
11291 
11292 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11293 {
11294  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11295  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11296 
11297  void* pData;
11298  VkResult res = Map(hAllocator, 1, &pData);
11299  if(res != VK_SUCCESS)
11300  {
11301  return res;
11302  }
11303 
11304  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11305  {
11306  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11307  }
11308  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11309  {
11310  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11311  }
11312 
11313  Unmap(hAllocator, 1);
11314 
11315  return VK_SUCCESS;
11316 }
11317 
11318 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11319  const VmaAllocator hAllocator,
11320  const VmaAllocation hAllocation,
11321  VkBuffer hBuffer)
11322 {
11323  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11324  hAllocation->GetBlock() == this);
11325  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11326  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11327  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11328  hAllocator->m_hDevice,
11329  hBuffer,
11330  m_hMemory,
11331  hAllocation->GetOffset());
11332 }
11333 
11334 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11335  const VmaAllocator hAllocator,
11336  const VmaAllocation hAllocation,
11337  VkImage hImage)
11338 {
11339  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11340  hAllocation->GetBlock() == this);
11341  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11342  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11343  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11344  hAllocator->m_hDevice,
11345  hImage,
11346  m_hMemory,
11347  hAllocation->GetOffset());
11348 }
11349 
11350 static void InitStatInfo(VmaStatInfo& outInfo)
11351 {
11352  memset(&outInfo, 0, sizeof(outInfo));
11353  outInfo.allocationSizeMin = UINT64_MAX;
11354  outInfo.unusedRangeSizeMin = UINT64_MAX;
11355 }
11356 
11357 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11358 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11359 {
11360  inoutInfo.blockCount += srcInfo.blockCount;
11361  inoutInfo.allocationCount += srcInfo.allocationCount;
11362  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11363  inoutInfo.usedBytes += srcInfo.usedBytes;
11364  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11365  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11366  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11367  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11368  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11369 }
11370 
11371 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11372 {
11373  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11374  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11375  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11376  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11377 }
11378 
11379 VmaPool_T::VmaPool_T(
11380  VmaAllocator hAllocator,
11381  const VmaPoolCreateInfo& createInfo,
11382  VkDeviceSize preferredBlockSize) :
11383  m_BlockVector(
11384  hAllocator,
11385  this, // hParentPool
11386  createInfo.memoryTypeIndex,
11387  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11388  createInfo.minBlockCount,
11389  createInfo.maxBlockCount,
11390  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11391  createInfo.frameInUseCount,
11392  true, // isCustomPool
11393  createInfo.blockSize != 0, // explicitBlockSize
11394  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11395  m_Id(0)
11396 {
11397 }
11398 
11399 VmaPool_T::~VmaPool_T()
11400 {
11401 }
11402 
11403 #if VMA_STATS_STRING_ENABLED
11404 
11405 #endif // #if VMA_STATS_STRING_ENABLED
11406 
11407 VmaBlockVector::VmaBlockVector(
11408  VmaAllocator hAllocator,
11409  VmaPool hParentPool,
11410  uint32_t memoryTypeIndex,
11411  VkDeviceSize preferredBlockSize,
11412  size_t minBlockCount,
11413  size_t maxBlockCount,
11414  VkDeviceSize bufferImageGranularity,
11415  uint32_t frameInUseCount,
11416  bool isCustomPool,
11417  bool explicitBlockSize,
11418  uint32_t algorithm) :
11419  m_hAllocator(hAllocator),
11420  m_hParentPool(hParentPool),
11421  m_MemoryTypeIndex(memoryTypeIndex),
11422  m_PreferredBlockSize(preferredBlockSize),
11423  m_MinBlockCount(minBlockCount),
11424  m_MaxBlockCount(maxBlockCount),
11425  m_BufferImageGranularity(bufferImageGranularity),
11426  m_FrameInUseCount(frameInUseCount),
11427  m_IsCustomPool(isCustomPool),
11428  m_ExplicitBlockSize(explicitBlockSize),
11429  m_Algorithm(algorithm),
11430  m_HasEmptyBlock(false),
11431  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11432  m_NextBlockId(0)
11433 {
11434 }
11435 
11436 VmaBlockVector::~VmaBlockVector()
11437 {
11438  for(size_t i = m_Blocks.size(); i--; )
11439  {
11440  m_Blocks[i]->Destroy(m_hAllocator);
11441  vma_delete(m_hAllocator, m_Blocks[i]);
11442  }
11443 }
11444 
11445 VkResult VmaBlockVector::CreateMinBlocks()
11446 {
11447  for(size_t i = 0; i < m_MinBlockCount; ++i)
11448  {
11449  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11450  if(res != VK_SUCCESS)
11451  {
11452  return res;
11453  }
11454  }
11455  return VK_SUCCESS;
11456 }
11457 
11458 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11459 {
11460  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11461 
11462  const size_t blockCount = m_Blocks.size();
11463 
11464  pStats->size = 0;
11465  pStats->unusedSize = 0;
11466  pStats->allocationCount = 0;
11467  pStats->unusedRangeCount = 0;
11468  pStats->unusedRangeSizeMax = 0;
11469  pStats->blockCount = blockCount;
11470 
11471  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11472  {
11473  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11474  VMA_ASSERT(pBlock);
11475  VMA_HEAVY_ASSERT(pBlock->Validate());
11476  pBlock->m_pMetadata->AddPoolStats(*pStats);
11477  }
11478 }
11479 
11480 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11481 {
11482  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11483  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11484  (VMA_DEBUG_MARGIN > 0) &&
11485  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11486  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11487 }
11488 
11489 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11490 
11491 VkResult VmaBlockVector::Allocate(
11492  uint32_t currentFrameIndex,
11493  VkDeviceSize size,
11494  VkDeviceSize alignment,
11495  const VmaAllocationCreateInfo& createInfo,
11496  VmaSuballocationType suballocType,
11497  size_t allocationCount,
11498  VmaAllocation* pAllocations)
11499 {
11500  size_t allocIndex;
11501  VkResult res = VK_SUCCESS;
11502 
11503  if(IsCorruptionDetectionEnabled())
11504  {
11505  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11506  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11507  }
11508 
11509  {
11510  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11511  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11512  {
11513  res = AllocatePage(
11514  currentFrameIndex,
11515  size,
11516  alignment,
11517  createInfo,
11518  suballocType,
11519  pAllocations + allocIndex);
11520  if(res != VK_SUCCESS)
11521  {
11522  break;
11523  }
11524  }
11525  }
11526 
11527  if(res != VK_SUCCESS)
11528  {
11529  // Free all already created allocations.
11530  while(allocIndex--)
11531  {
11532  Free(pAllocations[allocIndex]);
11533  }
11534  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11535  }
11536 
11537  return res;
11538 }
11539 
11540 VkResult VmaBlockVector::AllocatePage(
11541  uint32_t currentFrameIndex,
11542  VkDeviceSize size,
11543  VkDeviceSize alignment,
11544  const VmaAllocationCreateInfo& createInfo,
11545  VmaSuballocationType suballocType,
11546  VmaAllocation* pAllocation)
11547 {
11548  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11549  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11550  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11551  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11552  const bool canCreateNewBlock =
11553  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11554  (m_Blocks.size() < m_MaxBlockCount);
11555  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11556 
11557  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11558  // Which in turn is available only when maxBlockCount = 1.
11559  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11560  {
11561  canMakeOtherLost = false;
11562  }
11563 
11564  // Upper address can only be used with linear allocator and within single memory block.
11565  if(isUpperAddress &&
11566  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11567  {
11568  return VK_ERROR_FEATURE_NOT_PRESENT;
11569  }
11570 
11571  // Validate strategy.
11572  switch(strategy)
11573  {
11574  case 0:
11576  break;
11580  break;
11581  default:
11582  return VK_ERROR_FEATURE_NOT_PRESENT;
11583  }
11584 
11585  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11586  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11587  {
11588  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11589  }
11590 
11591  /*
11592  Under certain condition, this whole section can be skipped for optimization, so
11593  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11594  e.g. for custom pools with linear algorithm.
11595  */
11596  if(!canMakeOtherLost || canCreateNewBlock)
11597  {
11598  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11599  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11601 
11602  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11603  {
11604  // Use only last block.
11605  if(!m_Blocks.empty())
11606  {
11607  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11608  VMA_ASSERT(pCurrBlock);
11609  VkResult res = AllocateFromBlock(
11610  pCurrBlock,
11611  currentFrameIndex,
11612  size,
11613  alignment,
11614  allocFlagsCopy,
11615  createInfo.pUserData,
11616  suballocType,
11617  strategy,
11618  pAllocation);
11619  if(res == VK_SUCCESS)
11620  {
11621  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11622  return VK_SUCCESS;
11623  }
11624  }
11625  }
11626  else
11627  {
11629  {
11630  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11631  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11632  {
11633  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11634  VMA_ASSERT(pCurrBlock);
11635  VkResult res = AllocateFromBlock(
11636  pCurrBlock,
11637  currentFrameIndex,
11638  size,
11639  alignment,
11640  allocFlagsCopy,
11641  createInfo.pUserData,
11642  suballocType,
11643  strategy,
11644  pAllocation);
11645  if(res == VK_SUCCESS)
11646  {
11647  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11648  return VK_SUCCESS;
11649  }
11650  }
11651  }
11652  else // WORST_FIT, FIRST_FIT
11653  {
11654  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11655  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11656  {
11657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11658  VMA_ASSERT(pCurrBlock);
11659  VkResult res = AllocateFromBlock(
11660  pCurrBlock,
11661  currentFrameIndex,
11662  size,
11663  alignment,
11664  allocFlagsCopy,
11665  createInfo.pUserData,
11666  suballocType,
11667  strategy,
11668  pAllocation);
11669  if(res == VK_SUCCESS)
11670  {
11671  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11672  return VK_SUCCESS;
11673  }
11674  }
11675  }
11676  }
11677 
11678  // 2. Try to create new block.
11679  if(canCreateNewBlock)
11680  {
11681  // Calculate optimal size for new block.
11682  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11683  uint32_t newBlockSizeShift = 0;
11684  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11685 
11686  if(!m_ExplicitBlockSize)
11687  {
11688  // Allocate 1/8, 1/4, 1/2 as first blocks.
11689  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11690  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11691  {
11692  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11693  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11694  {
11695  newBlockSize = smallerNewBlockSize;
11696  ++newBlockSizeShift;
11697  }
11698  else
11699  {
11700  break;
11701  }
11702  }
11703  }
11704 
11705  size_t newBlockIndex = 0;
11706  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11707  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11708  if(!m_ExplicitBlockSize)
11709  {
11710  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11711  {
11712  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11713  if(smallerNewBlockSize >= size)
11714  {
11715  newBlockSize = smallerNewBlockSize;
11716  ++newBlockSizeShift;
11717  res = CreateBlock(newBlockSize, &newBlockIndex);
11718  }
11719  else
11720  {
11721  break;
11722  }
11723  }
11724  }
11725 
11726  if(res == VK_SUCCESS)
11727  {
11728  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11729  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11730 
11731  res = AllocateFromBlock(
11732  pBlock,
11733  currentFrameIndex,
11734  size,
11735  alignment,
11736  allocFlagsCopy,
11737  createInfo.pUserData,
11738  suballocType,
11739  strategy,
11740  pAllocation);
11741  if(res == VK_SUCCESS)
11742  {
11743  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11744  return VK_SUCCESS;
11745  }
11746  else
11747  {
11748  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11749  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11750  }
11751  }
11752  }
11753  }
11754 
11755  // 3. Try to allocate from existing blocks with making other allocations lost.
11756  if(canMakeOtherLost)
11757  {
11758  uint32_t tryIndex = 0;
11759  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11760  {
11761  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11762  VmaAllocationRequest bestRequest = {};
11763  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11764 
11765  // 1. Search existing allocations.
11767  {
11768  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11769  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11770  {
11771  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11772  VMA_ASSERT(pCurrBlock);
11773  VmaAllocationRequest currRequest = {};
11774  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11775  currentFrameIndex,
11776  m_FrameInUseCount,
11777  m_BufferImageGranularity,
11778  size,
11779  alignment,
11780  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11781  suballocType,
11782  canMakeOtherLost,
11783  strategy,
11784  &currRequest))
11785  {
11786  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11787  if(pBestRequestBlock == VMA_NULL ||
11788  currRequestCost < bestRequestCost)
11789  {
11790  pBestRequestBlock = pCurrBlock;
11791  bestRequest = currRequest;
11792  bestRequestCost = currRequestCost;
11793 
11794  if(bestRequestCost == 0)
11795  {
11796  break;
11797  }
11798  }
11799  }
11800  }
11801  }
11802  else // WORST_FIT, FIRST_FIT
11803  {
11804  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11805  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11806  {
11807  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11808  VMA_ASSERT(pCurrBlock);
11809  VmaAllocationRequest currRequest = {};
11810  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11811  currentFrameIndex,
11812  m_FrameInUseCount,
11813  m_BufferImageGranularity,
11814  size,
11815  alignment,
11816  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11817  suballocType,
11818  canMakeOtherLost,
11819  strategy,
11820  &currRequest))
11821  {
11822  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11823  if(pBestRequestBlock == VMA_NULL ||
11824  currRequestCost < bestRequestCost ||
11826  {
11827  pBestRequestBlock = pCurrBlock;
11828  bestRequest = currRequest;
11829  bestRequestCost = currRequestCost;
11830 
11831  if(bestRequestCost == 0 ||
11833  {
11834  break;
11835  }
11836  }
11837  }
11838  }
11839  }
11840 
11841  if(pBestRequestBlock != VMA_NULL)
11842  {
11843  if(mapped)
11844  {
11845  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11846  if(res != VK_SUCCESS)
11847  {
11848  return res;
11849  }
11850  }
11851 
11852  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11853  currentFrameIndex,
11854  m_FrameInUseCount,
11855  &bestRequest))
11856  {
11857  // We no longer have an empty Allocation.
11858  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11859  {
11860  m_HasEmptyBlock = false;
11861  }
11862  // Allocate from this pBlock.
11863  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11864  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11865  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11866  (*pAllocation)->InitBlockAllocation(
11867  pBestRequestBlock,
11868  bestRequest.offset,
11869  alignment,
11870  size,
11871  suballocType,
11872  mapped,
11873  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11874  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11875  VMA_DEBUG_LOG(" Returned from existing block");
11876  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11877  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11878  {
11879  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11880  }
11881  if(IsCorruptionDetectionEnabled())
11882  {
11883  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11884  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11885  }
11886  return VK_SUCCESS;
11887  }
11888  // else: Some allocations must have been touched while we are here. Next try.
11889  }
11890  else
11891  {
11892  // Could not find place in any of the blocks - break outer loop.
11893  break;
11894  }
11895  }
11896  /* Maximum number of tries exceeded - a very unlike event when many other
11897  threads are simultaneously touching allocations making it impossible to make
11898  lost at the same time as we try to allocate. */
11899  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11900  {
11901  return VK_ERROR_TOO_MANY_OBJECTS;
11902  }
11903  }
11904 
11905  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11906 }
11907 
11908 void VmaBlockVector::Free(
11909  VmaAllocation hAllocation)
11910 {
11911  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11912 
11913  // Scope for lock.
11914  {
11915  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11916 
11917  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11918 
11919  if(IsCorruptionDetectionEnabled())
11920  {
11921  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11922  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11923  }
11924 
11925  if(hAllocation->IsPersistentMap())
11926  {
11927  pBlock->Unmap(m_hAllocator, 1);
11928  }
11929 
11930  pBlock->m_pMetadata->Free(hAllocation);
11931  VMA_HEAVY_ASSERT(pBlock->Validate());
11932 
11933  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11934 
11935  // pBlock became empty after this deallocation.
11936  if(pBlock->m_pMetadata->IsEmpty())
11937  {
11938  // Already has empty Allocation. We don't want to have two, so delete this one.
11939  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11940  {
11941  pBlockToDelete = pBlock;
11942  Remove(pBlock);
11943  }
11944  // We now have first empty block.
11945  else
11946  {
11947  m_HasEmptyBlock = true;
11948  }
11949  }
11950  // pBlock didn't become empty, but we have another empty block - find and free that one.
11951  // (This is optional, heuristics.)
11952  else if(m_HasEmptyBlock)
11953  {
11954  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11955  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11956  {
11957  pBlockToDelete = pLastBlock;
11958  m_Blocks.pop_back();
11959  m_HasEmptyBlock = false;
11960  }
11961  }
11962 
11963  IncrementallySortBlocks();
11964  }
11965 
11966  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11967  // lock, for performance reason.
11968  if(pBlockToDelete != VMA_NULL)
11969  {
11970  VMA_DEBUG_LOG(" Deleted empty allocation");
11971  pBlockToDelete->Destroy(m_hAllocator);
11972  vma_delete(m_hAllocator, pBlockToDelete);
11973  }
11974 }
11975 
11976 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11977 {
11978  VkDeviceSize result = 0;
11979  for(size_t i = m_Blocks.size(); i--; )
11980  {
11981  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11982  if(result >= m_PreferredBlockSize)
11983  {
11984  break;
11985  }
11986  }
11987  return result;
11988 }
11989 
11990 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11991 {
11992  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11993  {
11994  if(m_Blocks[blockIndex] == pBlock)
11995  {
11996  VmaVectorRemove(m_Blocks, blockIndex);
11997  return;
11998  }
11999  }
12000  VMA_ASSERT(0);
12001 }
12002 
12003 void VmaBlockVector::IncrementallySortBlocks()
12004 {
12005  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12006  {
12007  // Bubble sort only until first swap.
12008  for(size_t i = 1; i < m_Blocks.size(); ++i)
12009  {
12010  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12011  {
12012  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12013  return;
12014  }
12015  }
12016  }
12017 }
12018 
12019 VkResult VmaBlockVector::AllocateFromBlock(
12020  VmaDeviceMemoryBlock* pBlock,
12021  uint32_t currentFrameIndex,
12022  VkDeviceSize size,
12023  VkDeviceSize alignment,
12024  VmaAllocationCreateFlags allocFlags,
12025  void* pUserData,
12026  VmaSuballocationType suballocType,
12027  uint32_t strategy,
12028  VmaAllocation* pAllocation)
12029 {
12030  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12031  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12032  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12033  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12034 
12035  VmaAllocationRequest currRequest = {};
12036  if(pBlock->m_pMetadata->CreateAllocationRequest(
12037  currentFrameIndex,
12038  m_FrameInUseCount,
12039  m_BufferImageGranularity,
12040  size,
12041  alignment,
12042  isUpperAddress,
12043  suballocType,
12044  false, // canMakeOtherLost
12045  strategy,
12046  &currRequest))
12047  {
12048  // Allocate from pCurrBlock.
12049  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12050 
12051  if(mapped)
12052  {
12053  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12054  if(res != VK_SUCCESS)
12055  {
12056  return res;
12057  }
12058  }
12059 
12060  // We no longer have an empty Allocation.
12061  if(pBlock->m_pMetadata->IsEmpty())
12062  {
12063  m_HasEmptyBlock = false;
12064  }
12065 
12066  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12067  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12068  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12069  (*pAllocation)->InitBlockAllocation(
12070  pBlock,
12071  currRequest.offset,
12072  alignment,
12073  size,
12074  suballocType,
12075  mapped,
12076  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12077  VMA_HEAVY_ASSERT(pBlock->Validate());
12078  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12079  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12080  {
12081  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12082  }
12083  if(IsCorruptionDetectionEnabled())
12084  {
12085  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12086  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12087  }
12088  return VK_SUCCESS;
12089  }
12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12091 }
12092 
12093 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12094 {
12095  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12096  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12097  allocInfo.allocationSize = blockSize;
12098  VkDeviceMemory mem = VK_NULL_HANDLE;
12099  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12100  if(res < 0)
12101  {
12102  return res;
12103  }
12104 
12105  // New VkDeviceMemory successfully created.
12106 
12107  // Create new Allocation for it.
12108  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12109  pBlock->Init(
12110  m_hAllocator,
12111  m_hParentPool,
12112  m_MemoryTypeIndex,
12113  mem,
12114  allocInfo.allocationSize,
12115  m_NextBlockId++,
12116  m_Algorithm);
12117 
12118  m_Blocks.push_back(pBlock);
12119  if(pNewBlockIndex != VMA_NULL)
12120  {
12121  *pNewBlockIndex = m_Blocks.size() - 1;
12122  }
12123 
12124  return VK_SUCCESS;
12125 }
12126 
12127 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12128  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12129  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12130 {
12131  const size_t blockCount = m_Blocks.size();
12132  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12133 
12134  enum BLOCK_FLAG
12135  {
12136  BLOCK_FLAG_USED = 0x00000001,
12137  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12138  };
12139 
12140  struct BlockInfo
12141  {
12142  uint32_t flags;
12143  void* pMappedData;
12144  };
12145  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12146  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12147  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12148 
12149  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12150  const size_t moveCount = moves.size();
12151  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12152  {
12153  const VmaDefragmentationMove& move = moves[moveIndex];
12154  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12155  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12156  }
12157 
12158  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12159 
12160  // Go over all blocks. Get mapped pointer or map if necessary.
12161  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12162  {
12163  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12164  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12165  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12166  {
12167  currBlockInfo.pMappedData = pBlock->GetMappedData();
12168  // It is not originally mapped - map it.
12169  if(currBlockInfo.pMappedData == VMA_NULL)
12170  {
12171  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12172  if(pDefragCtx->res == VK_SUCCESS)
12173  {
12174  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12175  }
12176  }
12177  }
12178  }
12179 
12180  // Go over all moves. Do actual data transfer.
12181  if(pDefragCtx->res == VK_SUCCESS)
12182  {
12183  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12184  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12185 
12186  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12187  {
12188  const VmaDefragmentationMove& move = moves[moveIndex];
12189 
12190  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12191  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12192 
12193  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12194 
12195  // Invalidate source.
12196  if(isNonCoherent)
12197  {
12198  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12199  memRange.memory = pSrcBlock->GetDeviceMemory();
12200  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12201  memRange.size = VMA_MIN(
12202  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12203  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12204  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12205  }
12206 
12207  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12208  memmove(
12209  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12210  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12211  static_cast<size_t>(move.size));
12212 
12213  if(IsCorruptionDetectionEnabled())
12214  {
12215  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12216  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12217  }
12218 
12219  // Flush destination.
12220  if(isNonCoherent)
12221  {
12222  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12223  memRange.memory = pDstBlock->GetDeviceMemory();
12224  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12225  memRange.size = VMA_MIN(
12226  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12227  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12228  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12229  }
12230  }
12231  }
12232 
12233  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12234  // Regardless of pCtx->res == VK_SUCCESS.
12235  for(size_t blockIndex = blockCount; blockIndex--; )
12236  {
12237  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12238  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12239  {
12240  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12241  pBlock->Unmap(m_hAllocator, 1);
12242  }
12243  }
12244 }
12245 
12246 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12247  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12248  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12249  VkCommandBuffer commandBuffer)
12250 {
12251  const size_t blockCount = m_Blocks.size();
12252 
12253  pDefragCtx->blockContexts.resize(blockCount);
12254  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12255 
12256  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12257  const size_t moveCount = moves.size();
12258  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12259  {
12260  const VmaDefragmentationMove& move = moves[moveIndex];
12261  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12262  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12263  }
12264 
12265  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12266 
12267  // Go over all blocks. Create and bind buffer for whole block if necessary.
12268  {
12269  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12270  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12271  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12272 
12273  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12274  {
12275  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12276  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12277  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12278  {
12279  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12280  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12281  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12282  if(pDefragCtx->res == VK_SUCCESS)
12283  {
12284  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12285  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12286  }
12287  }
12288  }
12289  }
12290 
12291  // Go over all moves. Post data transfer commands to command buffer.
12292  if(pDefragCtx->res == VK_SUCCESS)
12293  {
12294  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12295  {
12296  const VmaDefragmentationMove& move = moves[moveIndex];
12297 
12298  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12299  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12300 
12301  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12302 
12303  VkBufferCopy region = {
12304  move.srcOffset,
12305  move.dstOffset,
12306  move.size };
12307  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12308  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12309  }
12310  }
12311 
12312  // Save buffers to defrag context for later destruction.
12313  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12314  {
12315  pDefragCtx->res = VK_NOT_READY;
12316  }
12317 }
12318 
12319 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12320 {
12321  m_HasEmptyBlock = false;
12322  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12323  {
12324  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12325  if(pBlock->m_pMetadata->IsEmpty())
12326  {
12327  if(m_Blocks.size() > m_MinBlockCount)
12328  {
12329  if(pDefragmentationStats != VMA_NULL)
12330  {
12331  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12332  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12333  }
12334 
12335  VmaVectorRemove(m_Blocks, blockIndex);
12336  pBlock->Destroy(m_hAllocator);
12337  vma_delete(m_hAllocator, pBlock);
12338  }
12339  else
12340  {
12341  m_HasEmptyBlock = true;
12342  }
12343  }
12344  }
12345 }
12346 
12347 #if VMA_STATS_STRING_ENABLED
12348 
12349 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12350 {
12351  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12352 
12353  json.BeginObject();
12354 
12355  if(m_IsCustomPool)
12356  {
12357  json.WriteString("MemoryTypeIndex");
12358  json.WriteNumber(m_MemoryTypeIndex);
12359 
12360  json.WriteString("BlockSize");
12361  json.WriteNumber(m_PreferredBlockSize);
12362 
12363  json.WriteString("BlockCount");
12364  json.BeginObject(true);
12365  if(m_MinBlockCount > 0)
12366  {
12367  json.WriteString("Min");
12368  json.WriteNumber((uint64_t)m_MinBlockCount);
12369  }
12370  if(m_MaxBlockCount < SIZE_MAX)
12371  {
12372  json.WriteString("Max");
12373  json.WriteNumber((uint64_t)m_MaxBlockCount);
12374  }
12375  json.WriteString("Cur");
12376  json.WriteNumber((uint64_t)m_Blocks.size());
12377  json.EndObject();
12378 
12379  if(m_FrameInUseCount > 0)
12380  {
12381  json.WriteString("FrameInUseCount");
12382  json.WriteNumber(m_FrameInUseCount);
12383  }
12384 
12385  if(m_Algorithm != 0)
12386  {
12387  json.WriteString("Algorithm");
12388  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12389  }
12390  }
12391  else
12392  {
12393  json.WriteString("PreferredBlockSize");
12394  json.WriteNumber(m_PreferredBlockSize);
12395  }
12396 
12397  json.WriteString("Blocks");
12398  json.BeginObject();
12399  for(size_t i = 0; i < m_Blocks.size(); ++i)
12400  {
12401  json.BeginString();
12402  json.ContinueString(m_Blocks[i]->GetId());
12403  json.EndString();
12404 
12405  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12406  }
12407  json.EndObject();
12408 
12409  json.EndObject();
12410 }
12411 
12412 #endif // #if VMA_STATS_STRING_ENABLED
12413 
12414 void VmaBlockVector::Defragment(
12415  class VmaBlockVectorDefragmentationContext* pCtx,
12416  VmaDefragmentationStats* pStats,
12417  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12418  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12419  VkCommandBuffer commandBuffer)
12420 {
12421  pCtx->res = VK_SUCCESS;
12422 
12423  const VkMemoryPropertyFlags memPropFlags =
12424  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12425  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12426  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12427 
12428  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12429  isHostVisible;
12430  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12431  !IsCorruptionDetectionEnabled();
12432 
12433  // There are options to defragment this memory type.
12434  if(canDefragmentOnCpu || canDefragmentOnGpu)
12435  {
12436  bool defragmentOnGpu;
12437  // There is only one option to defragment this memory type.
12438  if(canDefragmentOnGpu != canDefragmentOnCpu)
12439  {
12440  defragmentOnGpu = canDefragmentOnGpu;
12441  }
12442  // Both options are available: Heuristics to choose the best one.
12443  else
12444  {
12445  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12446  m_hAllocator->IsIntegratedGpu();
12447  }
12448 
12449  bool overlappingMoveSupported = !defragmentOnGpu;
12450 
12451  if(m_hAllocator->m_UseMutex)
12452  {
12453  m_Mutex.LockWrite();
12454  pCtx->mutexLocked = true;
12455  }
12456 
12457  pCtx->Begin(overlappingMoveSupported);
12458 
12459  // Defragment.
12460 
12461  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12462  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12463  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12464  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12465  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12466 
12467  // Accumulate statistics.
12468  if(pStats != VMA_NULL)
12469  {
12470  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12471  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12472  pStats->bytesMoved += bytesMoved;
12473  pStats->allocationsMoved += allocationsMoved;
12474  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12475  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12476  if(defragmentOnGpu)
12477  {
12478  maxGpuBytesToMove -= bytesMoved;
12479  maxGpuAllocationsToMove -= allocationsMoved;
12480  }
12481  else
12482  {
12483  maxCpuBytesToMove -= bytesMoved;
12484  maxCpuAllocationsToMove -= allocationsMoved;
12485  }
12486  }
12487 
12488  if(pCtx->res >= VK_SUCCESS)
12489  {
12490  if(defragmentOnGpu)
12491  {
12492  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12493  }
12494  else
12495  {
12496  ApplyDefragmentationMovesCpu(pCtx, moves);
12497  }
12498  }
12499  }
12500 }
12501 
12502 void VmaBlockVector::DefragmentationEnd(
12503  class VmaBlockVectorDefragmentationContext* pCtx,
12504  VmaDefragmentationStats* pStats)
12505 {
12506  // Destroy buffers.
12507  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12508  {
12509  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12510  if(blockCtx.hBuffer)
12511  {
12512  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12513  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12514  }
12515  }
12516 
12517  if(pCtx->res >= VK_SUCCESS)
12518  {
12519  FreeEmptyBlocks(pStats);
12520  }
12521 
12522  if(pCtx->mutexLocked)
12523  {
12524  VMA_ASSERT(m_hAllocator->m_UseMutex);
12525  m_Mutex.UnlockWrite();
12526  }
12527 }
12528 
12529 size_t VmaBlockVector::CalcAllocationCount() const
12530 {
12531  size_t result = 0;
12532  for(size_t i = 0; i < m_Blocks.size(); ++i)
12533  {
12534  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12535  }
12536  return result;
12537 }
12538 
12539 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12540 {
12541  if(m_BufferImageGranularity == 1)
12542  {
12543  return false;
12544  }
12545  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12546  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12547  {
12548  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12549  VMA_ASSERT(m_Algorithm == 0);
12550  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12551  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12552  {
12553  return true;
12554  }
12555  }
12556  return false;
12557 }
12558 
12559 void VmaBlockVector::MakePoolAllocationsLost(
12560  uint32_t currentFrameIndex,
12561  size_t* pLostAllocationCount)
12562 {
12563  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12564  size_t lostAllocationCount = 0;
12565  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12566  {
12567  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12568  VMA_ASSERT(pBlock);
12569  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12570  }
12571  if(pLostAllocationCount != VMA_NULL)
12572  {
12573  *pLostAllocationCount = lostAllocationCount;
12574  }
12575 }
12576 
12577 VkResult VmaBlockVector::CheckCorruption()
12578 {
12579  if(!IsCorruptionDetectionEnabled())
12580  {
12581  return VK_ERROR_FEATURE_NOT_PRESENT;
12582  }
12583 
12584  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12585  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12586  {
12587  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12588  VMA_ASSERT(pBlock);
12589  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12590  if(res != VK_SUCCESS)
12591  {
12592  return res;
12593  }
12594  }
12595  return VK_SUCCESS;
12596 }
12597 
12598 void VmaBlockVector::AddStats(VmaStats* pStats)
12599 {
12600  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12601  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12602 
12603  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12604 
12605  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12606  {
12607  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12608  VMA_ASSERT(pBlock);
12609  VMA_HEAVY_ASSERT(pBlock->Validate());
12610  VmaStatInfo allocationStatInfo;
12611  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12612  VmaAddStatInfo(pStats->total, allocationStatInfo);
12613  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12614  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12615  }
12616 }
12617 
12619 // VmaDefragmentationAlgorithm_Generic members definition
12620 
12621 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12622  VmaAllocator hAllocator,
12623  VmaBlockVector* pBlockVector,
12624  uint32_t currentFrameIndex,
12625  bool overlappingMoveSupported) :
12626  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12627  m_AllocationCount(0),
12628  m_AllAllocations(false),
12629  m_BytesMoved(0),
12630  m_AllocationsMoved(0),
12631  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12632 {
12633  // Create block info for each block.
12634  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12635  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12636  {
12637  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12638  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12639  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12640  m_Blocks.push_back(pBlockInfo);
12641  }
12642 
12643  // Sort them by m_pBlock pointer value.
12644  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12645 }
12646 
12647 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12648 {
12649  for(size_t i = m_Blocks.size(); i--; )
12650  {
12651  vma_delete(m_hAllocator, m_Blocks[i]);
12652  }
12653 }
12654 
12655 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12656 {
12657  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12658  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12659  {
12660  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12661  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12662  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12663  {
12664  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12665  (*it)->m_Allocations.push_back(allocInfo);
12666  }
12667  else
12668  {
12669  VMA_ASSERT(0);
12670  }
12671 
12672  ++m_AllocationCount;
12673  }
12674 }
12675 
12676 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12677  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12678  VkDeviceSize maxBytesToMove,
12679  uint32_t maxAllocationsToMove)
12680 {
12681  if(m_Blocks.empty())
12682  {
12683  return VK_SUCCESS;
12684  }
12685 
12686  // This is a choice based on research.
12687  // Option 1:
12688  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12689  // Option 2:
12690  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12691  // Option 3:
12692  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12693 
12694  size_t srcBlockMinIndex = 0;
12695  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12696  /*
12697  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12698  {
12699  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12700  if(blocksWithNonMovableCount > 0)
12701  {
12702  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12703  }
12704  }
12705  */
12706 
12707  size_t srcBlockIndex = m_Blocks.size() - 1;
12708  size_t srcAllocIndex = SIZE_MAX;
12709  for(;;)
12710  {
12711  // 1. Find next allocation to move.
12712  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12713  // 1.2. Then start from last to first m_Allocations.
12714  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12715  {
12716  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12717  {
12718  // Finished: no more allocations to process.
12719  if(srcBlockIndex == srcBlockMinIndex)
12720  {
12721  return VK_SUCCESS;
12722  }
12723  else
12724  {
12725  --srcBlockIndex;
12726  srcAllocIndex = SIZE_MAX;
12727  }
12728  }
12729  else
12730  {
12731  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12732  }
12733  }
12734 
12735  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12736  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12737 
12738  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12739  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12740  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12741  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12742 
12743  // 2. Try to find new place for this allocation in preceding or current block.
12744  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12745  {
12746  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12747  VmaAllocationRequest dstAllocRequest;
12748  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12749  m_CurrentFrameIndex,
12750  m_pBlockVector->GetFrameInUseCount(),
12751  m_pBlockVector->GetBufferImageGranularity(),
12752  size,
12753  alignment,
12754  false, // upperAddress
12755  suballocType,
12756  false, // canMakeOtherLost
12757  strategy,
12758  &dstAllocRequest) &&
12759  MoveMakesSense(
12760  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12761  {
12762  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12763 
12764  // Reached limit on number of allocations or bytes to move.
12765  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12766  (m_BytesMoved + size > maxBytesToMove))
12767  {
12768  return VK_SUCCESS;
12769  }
12770 
12771  VmaDefragmentationMove move;
12772  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12773  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12774  move.srcOffset = srcOffset;
12775  move.dstOffset = dstAllocRequest.offset;
12776  move.size = size;
12777  moves.push_back(move);
12778 
12779  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12780  dstAllocRequest,
12781  suballocType,
12782  size,
12783  allocInfo.m_hAllocation);
12784  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12785 
12786  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12787 
12788  if(allocInfo.m_pChanged != VMA_NULL)
12789  {
12790  *allocInfo.m_pChanged = VK_TRUE;
12791  }
12792 
12793  ++m_AllocationsMoved;
12794  m_BytesMoved += size;
12795 
12796  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12797 
12798  break;
12799  }
12800  }
12801 
12802  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12803 
12804  if(srcAllocIndex > 0)
12805  {
12806  --srcAllocIndex;
12807  }
12808  else
12809  {
12810  if(srcBlockIndex > 0)
12811  {
12812  --srcBlockIndex;
12813  srcAllocIndex = SIZE_MAX;
12814  }
12815  else
12816  {
12817  return VK_SUCCESS;
12818  }
12819  }
12820  }
12821 }
12822 
12823 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12824 {
12825  size_t result = 0;
12826  for(size_t i = 0; i < m_Blocks.size(); ++i)
12827  {
12828  if(m_Blocks[i]->m_HasNonMovableAllocations)
12829  {
12830  ++result;
12831  }
12832  }
12833  return result;
12834 }
12835 
12836 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12837  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12838  VkDeviceSize maxBytesToMove,
12839  uint32_t maxAllocationsToMove)
12840 {
12841  if(!m_AllAllocations && m_AllocationCount == 0)
12842  {
12843  return VK_SUCCESS;
12844  }
12845 
12846  const size_t blockCount = m_Blocks.size();
12847  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12848  {
12849  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12850 
12851  if(m_AllAllocations)
12852  {
12853  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12854  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12855  it != pMetadata->m_Suballocations.end();
12856  ++it)
12857  {
12858  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12859  {
12860  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12861  pBlockInfo->m_Allocations.push_back(allocInfo);
12862  }
12863  }
12864  }
12865 
12866  pBlockInfo->CalcHasNonMovableAllocations();
12867 
12868  // This is a choice based on research.
12869  // Option 1:
12870  pBlockInfo->SortAllocationsByOffsetDescending();
12871  // Option 2:
12872  //pBlockInfo->SortAllocationsBySizeDescending();
12873  }
12874 
12875  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12876  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12877 
12878  // This is a choice based on research.
12879  const uint32_t roundCount = 2;
12880 
12881  // Execute defragmentation rounds (the main part).
12882  VkResult result = VK_SUCCESS;
12883  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12884  {
12885  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12886  }
12887 
12888  return result;
12889 }
12890 
12891 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12892  size_t dstBlockIndex, VkDeviceSize dstOffset,
12893  size_t srcBlockIndex, VkDeviceSize srcOffset)
12894 {
12895  if(dstBlockIndex < srcBlockIndex)
12896  {
12897  return true;
12898  }
12899  if(dstBlockIndex > srcBlockIndex)
12900  {
12901  return false;
12902  }
12903  if(dstOffset < srcOffset)
12904  {
12905  return true;
12906  }
12907  return false;
12908 }
12909 
12911 // VmaDefragmentationAlgorithm_Fast
12912 
12913 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12914  VmaAllocator hAllocator,
12915  VmaBlockVector* pBlockVector,
12916  uint32_t currentFrameIndex,
12917  bool overlappingMoveSupported) :
12918  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12919  m_OverlappingMoveSupported(overlappingMoveSupported),
12920  m_AllocationCount(0),
12921  m_AllAllocations(false),
12922  m_BytesMoved(0),
12923  m_AllocationsMoved(0),
12924  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12925 {
12926  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12927 
12928 }
12929 
12930 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12931 {
12932 }
12933 
12934 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12935  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12936  VkDeviceSize maxBytesToMove,
12937  uint32_t maxAllocationsToMove)
12938 {
12939  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12940 
12941  const size_t blockCount = m_pBlockVector->GetBlockCount();
12942  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12943  {
12944  return VK_SUCCESS;
12945  }
12946 
12947  PreprocessMetadata();
12948 
12949  // Sort blocks in order from most destination.
12950 
12951  m_BlockInfos.resize(blockCount);
12952  for(size_t i = 0; i < blockCount; ++i)
12953  {
12954  m_BlockInfos[i].origBlockIndex = i;
12955  }
12956 
12957  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12958  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12959  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12960  });
12961 
12962  // THE MAIN ALGORITHM
12963 
12964  FreeSpaceDatabase freeSpaceDb;
12965 
12966  size_t dstBlockInfoIndex = 0;
12967  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12968  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12969  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12970  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12971  VkDeviceSize dstOffset = 0;
12972 
12973  bool end = false;
12974  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12975  {
12976  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12977  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12978  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12979  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12980  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12981  {
12982  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12983  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12984  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12985  if(m_AllocationsMoved == maxAllocationsToMove ||
12986  m_BytesMoved + srcAllocSize > maxBytesToMove)
12987  {
12988  end = true;
12989  break;
12990  }
12991  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12992 
12993  // Try to place it in one of free spaces from the database.
12994  size_t freeSpaceInfoIndex;
12995  VkDeviceSize dstAllocOffset;
12996  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12997  freeSpaceInfoIndex, dstAllocOffset))
12998  {
12999  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13000  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13001  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13002 
13003  // Same block
13004  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13005  {
13006  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13007 
13008  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13009 
13010  VmaSuballocation suballoc = *srcSuballocIt;
13011  suballoc.offset = dstAllocOffset;
13012  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13013  m_BytesMoved += srcAllocSize;
13014  ++m_AllocationsMoved;
13015 
13016  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13017  ++nextSuballocIt;
13018  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13019  srcSuballocIt = nextSuballocIt;
13020 
13021  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13022 
13023  VmaDefragmentationMove move = {
13024  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13025  srcAllocOffset, dstAllocOffset,
13026  srcAllocSize };
13027  moves.push_back(move);
13028  }
13029  // Different block
13030  else
13031  {
13032  // MOVE OPTION 2: Move the allocation to a different block.
13033 
13034  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13035 
13036  VmaSuballocation suballoc = *srcSuballocIt;
13037  suballoc.offset = dstAllocOffset;
13038  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13039  m_BytesMoved += srcAllocSize;
13040  ++m_AllocationsMoved;
13041 
13042  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13043  ++nextSuballocIt;
13044  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13045  srcSuballocIt = nextSuballocIt;
13046 
13047  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13048 
13049  VmaDefragmentationMove move = {
13050  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13051  srcAllocOffset, dstAllocOffset,
13052  srcAllocSize };
13053  moves.push_back(move);
13054  }
13055  }
13056  else
13057  {
13058  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13059 
13060  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13061  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13062  dstAllocOffset + srcAllocSize > dstBlockSize)
13063  {
13064  // But before that, register remaining free space at the end of dst block.
13065  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13066 
13067  ++dstBlockInfoIndex;
13068  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13069  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13070  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13071  dstBlockSize = pDstMetadata->GetSize();
13072  dstOffset = 0;
13073  dstAllocOffset = 0;
13074  }
13075 
13076  // Same block
13077  if(dstBlockInfoIndex == srcBlockInfoIndex)
13078  {
13079  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13080 
13081  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13082 
13083  bool skipOver = overlap;
13084  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13085  {
13086  // If destination and source place overlap, skip if it would move it
13087  // by only < 1/64 of its size.
13088  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13089  }
13090 
13091  if(skipOver)
13092  {
13093  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13094 
13095  dstOffset = srcAllocOffset + srcAllocSize;
13096  ++srcSuballocIt;
13097  }
13098  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13099  else
13100  {
13101  srcSuballocIt->offset = dstAllocOffset;
13102  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13103  dstOffset = dstAllocOffset + srcAllocSize;
13104  m_BytesMoved += srcAllocSize;
13105  ++m_AllocationsMoved;
13106  ++srcSuballocIt;
13107  VmaDefragmentationMove move = {
13108  srcOrigBlockIndex, dstOrigBlockIndex,
13109  srcAllocOffset, dstAllocOffset,
13110  srcAllocSize };
13111  moves.push_back(move);
13112  }
13113  }
13114  // Different block
13115  else
13116  {
13117  // MOVE OPTION 2: Move the allocation to a different block.
13118 
13119  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13120  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13121 
13122  VmaSuballocation suballoc = *srcSuballocIt;
13123  suballoc.offset = dstAllocOffset;
13124  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13125  dstOffset = dstAllocOffset + srcAllocSize;
13126  m_BytesMoved += srcAllocSize;
13127  ++m_AllocationsMoved;
13128 
13129  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13130  ++nextSuballocIt;
13131  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13132  srcSuballocIt = nextSuballocIt;
13133 
13134  pDstMetadata->m_Suballocations.push_back(suballoc);
13135 
13136  VmaDefragmentationMove move = {
13137  srcOrigBlockIndex, dstOrigBlockIndex,
13138  srcAllocOffset, dstAllocOffset,
13139  srcAllocSize };
13140  moves.push_back(move);
13141  }
13142  }
13143  }
13144  }
13145 
13146  m_BlockInfos.clear();
13147 
13148  PostprocessMetadata();
13149 
13150  return VK_SUCCESS;
13151 }
13152 
13153 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13154 {
13155  const size_t blockCount = m_pBlockVector->GetBlockCount();
13156  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13157  {
13158  VmaBlockMetadata_Generic* const pMetadata =
13159  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13160  pMetadata->m_FreeCount = 0;
13161  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13162  pMetadata->m_FreeSuballocationsBySize.clear();
13163  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13164  it != pMetadata->m_Suballocations.end(); )
13165  {
13166  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13167  {
13168  VmaSuballocationList::iterator nextIt = it;
13169  ++nextIt;
13170  pMetadata->m_Suballocations.erase(it);
13171  it = nextIt;
13172  }
13173  else
13174  {
13175  ++it;
13176  }
13177  }
13178  }
13179 }
13180 
13181 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13182 {
13183  const size_t blockCount = m_pBlockVector->GetBlockCount();
13184  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13185  {
13186  VmaBlockMetadata_Generic* const pMetadata =
13187  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13188  const VkDeviceSize blockSize = pMetadata->GetSize();
13189 
13190  // No allocations in this block - entire area is free.
13191  if(pMetadata->m_Suballocations.empty())
13192  {
13193  pMetadata->m_FreeCount = 1;
13194  //pMetadata->m_SumFreeSize is already set to blockSize.
13195  VmaSuballocation suballoc = {
13196  0, // offset
13197  blockSize, // size
13198  VMA_NULL, // hAllocation
13199  VMA_SUBALLOCATION_TYPE_FREE };
13200  pMetadata->m_Suballocations.push_back(suballoc);
13201  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13202  }
13203  // There are some allocations in this block.
13204  else
13205  {
13206  VkDeviceSize offset = 0;
13207  VmaSuballocationList::iterator it;
13208  for(it = pMetadata->m_Suballocations.begin();
13209  it != pMetadata->m_Suballocations.end();
13210  ++it)
13211  {
13212  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13213  VMA_ASSERT(it->offset >= offset);
13214 
13215  // Need to insert preceding free space.
13216  if(it->offset > offset)
13217  {
13218  ++pMetadata->m_FreeCount;
13219  const VkDeviceSize freeSize = it->offset - offset;
13220  VmaSuballocation suballoc = {
13221  offset, // offset
13222  freeSize, // size
13223  VMA_NULL, // hAllocation
13224  VMA_SUBALLOCATION_TYPE_FREE };
13225  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13226  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13227  {
13228  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13229  }
13230  }
13231 
13232  pMetadata->m_SumFreeSize -= it->size;
13233  offset = it->offset + it->size;
13234  }
13235 
13236  // Need to insert trailing free space.
13237  if(offset < blockSize)
13238  {
13239  ++pMetadata->m_FreeCount;
13240  const VkDeviceSize freeSize = blockSize - offset;
13241  VmaSuballocation suballoc = {
13242  offset, // offset
13243  freeSize, // size
13244  VMA_NULL, // hAllocation
13245  VMA_SUBALLOCATION_TYPE_FREE };
13246  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13247  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13248  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13249  {
13250  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13251  }
13252  }
13253 
13254  VMA_SORT(
13255  pMetadata->m_FreeSuballocationsBySize.begin(),
13256  pMetadata->m_FreeSuballocationsBySize.end(),
13257  VmaSuballocationItemSizeLess());
13258  }
13259 
13260  VMA_HEAVY_ASSERT(pMetadata->Validate());
13261  }
13262 }
13263 
13264 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13265 {
13266  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13267  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13268  while(it != pMetadata->m_Suballocations.end())
13269  {
13270  if(it->offset < suballoc.offset)
13271  {
13272  ++it;
13273  }
13274  }
13275  pMetadata->m_Suballocations.insert(it, suballoc);
13276 }
13277 
13279 // VmaBlockVectorDefragmentationContext
13280 
13281 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13282  VmaAllocator hAllocator,
13283  VmaPool hCustomPool,
13284  VmaBlockVector* pBlockVector,
13285  uint32_t currFrameIndex,
13286  uint32_t algorithmFlags) :
13287  res(VK_SUCCESS),
13288  mutexLocked(false),
13289  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13290  m_hAllocator(hAllocator),
13291  m_hCustomPool(hCustomPool),
13292  m_pBlockVector(pBlockVector),
13293  m_CurrFrameIndex(currFrameIndex),
13294  m_AlgorithmFlags(algorithmFlags),
13295  m_pAlgorithm(VMA_NULL),
13296  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13297  m_AllAllocations(false)
13298 {
13299 }
13300 
13301 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13302 {
13303  vma_delete(m_hAllocator, m_pAlgorithm);
13304 }
13305 
13306 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13307 {
13308  AllocInfo info = { hAlloc, pChanged };
13309  m_Allocations.push_back(info);
13310 }
13311 
13312 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13313 {
13314  const bool allAllocations = m_AllAllocations ||
13315  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13316 
13317  /********************************
13318  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13319  ********************************/
13320 
13321  /*
13322  Fast algorithm is supported only when certain criteria are met:
13323  - VMA_DEBUG_MARGIN is 0.
13324  - All allocations in this block vector are moveable.
13325  - There is no possibility of image/buffer granularity conflict.
13326  */
13327  if(VMA_DEBUG_MARGIN == 0 &&
13328  allAllocations &&
13329  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13330  {
13331  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13332  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13333  }
13334  else
13335  {
13336  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13337  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13338  }
13339 
13340  if(allAllocations)
13341  {
13342  m_pAlgorithm->AddAll();
13343  }
13344  else
13345  {
13346  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13347  {
13348  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13349  }
13350  }
13351 }
13352 
13354 // VmaDefragmentationContext
13355 
13356 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13357  VmaAllocator hAllocator,
13358  uint32_t currFrameIndex,
13359  uint32_t flags,
13360  VmaDefragmentationStats* pStats) :
13361  m_hAllocator(hAllocator),
13362  m_CurrFrameIndex(currFrameIndex),
13363  m_Flags(flags),
13364  m_pStats(pStats),
13365  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13366 {
13367  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13368 }
13369 
13370 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13371 {
13372  for(size_t i = m_CustomPoolContexts.size(); i--; )
13373  {
13374  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13375  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13376  vma_delete(m_hAllocator, pBlockVectorCtx);
13377  }
13378  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13379  {
13380  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13381  if(pBlockVectorCtx)
13382  {
13383  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13384  vma_delete(m_hAllocator, pBlockVectorCtx);
13385  }
13386  }
13387 }
13388 
13389 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13390 {
13391  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13392  {
13393  VmaPool pool = pPools[poolIndex];
13394  VMA_ASSERT(pool);
13395  // Pools with algorithm other than default are not defragmented.
13396  if(pool->m_BlockVector.GetAlgorithm() == 0)
13397  {
13398  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13399 
13400  for(size_t i = m_CustomPoolContexts.size(); i--; )
13401  {
13402  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13403  {
13404  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13405  break;
13406  }
13407  }
13408 
13409  if(!pBlockVectorDefragCtx)
13410  {
13411  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13412  m_hAllocator,
13413  pool,
13414  &pool->m_BlockVector,
13415  m_CurrFrameIndex,
13416  m_Flags);
13417  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13418  }
13419 
13420  pBlockVectorDefragCtx->AddAll();
13421  }
13422  }
13423 }
13424 
13425 void VmaDefragmentationContext_T::AddAllocations(
13426  uint32_t allocationCount,
13427  VmaAllocation* pAllocations,
13428  VkBool32* pAllocationsChanged)
13429 {
13430  // Dispatch pAllocations among defragmentators. Create them when necessary.
13431  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13432  {
13433  const VmaAllocation hAlloc = pAllocations[allocIndex];
13434  VMA_ASSERT(hAlloc);
13435  // DedicatedAlloc cannot be defragmented.
13436  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13437  // Lost allocation cannot be defragmented.
13438  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13439  {
13440  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13441 
13442  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13443  // This allocation belongs to custom pool.
13444  if(hAllocPool != VK_NULL_HANDLE)
13445  {
13446  // Pools with algorithm other than default are not defragmented.
13447  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13448  {
13449  for(size_t i = m_CustomPoolContexts.size(); i--; )
13450  {
13451  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13452  {
13453  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13454  break;
13455  }
13456  }
13457  if(!pBlockVectorDefragCtx)
13458  {
13459  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13460  m_hAllocator,
13461  hAllocPool,
13462  &hAllocPool->m_BlockVector,
13463  m_CurrFrameIndex,
13464  m_Flags);
13465  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13466  }
13467  }
13468  }
13469  // This allocation belongs to default pool.
13470  else
13471  {
13472  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13473  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13474  if(!pBlockVectorDefragCtx)
13475  {
13476  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13477  m_hAllocator,
13478  VMA_NULL, // hCustomPool
13479  m_hAllocator->m_pBlockVectors[memTypeIndex],
13480  m_CurrFrameIndex,
13481  m_Flags);
13482  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13483  }
13484  }
13485 
13486  if(pBlockVectorDefragCtx)
13487  {
13488  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13489  &pAllocationsChanged[allocIndex] : VMA_NULL;
13490  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13491  }
13492  }
13493  }
13494 }
13495 
13496 VkResult VmaDefragmentationContext_T::Defragment(
13497  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13498  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13499  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13500 {
13501  if(pStats)
13502  {
13503  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13504  }
13505 
13506  if(commandBuffer == VK_NULL_HANDLE)
13507  {
13508  maxGpuBytesToMove = 0;
13509  maxGpuAllocationsToMove = 0;
13510  }
13511 
13512  VkResult res = VK_SUCCESS;
13513 
13514  // Process default pools.
13515  for(uint32_t memTypeIndex = 0;
13516  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13517  ++memTypeIndex)
13518  {
13519  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13520  if(pBlockVectorCtx)
13521  {
13522  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13523  pBlockVectorCtx->GetBlockVector()->Defragment(
13524  pBlockVectorCtx,
13525  pStats,
13526  maxCpuBytesToMove, maxCpuAllocationsToMove,
13527  maxGpuBytesToMove, maxGpuAllocationsToMove,
13528  commandBuffer);
13529  if(pBlockVectorCtx->res != VK_SUCCESS)
13530  {
13531  res = pBlockVectorCtx->res;
13532  }
13533  }
13534  }
13535 
13536  // Process custom pools.
13537  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13538  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13539  ++customCtxIndex)
13540  {
13541  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13542  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13543  pBlockVectorCtx->GetBlockVector()->Defragment(
13544  pBlockVectorCtx,
13545  pStats,
13546  maxCpuBytesToMove, maxCpuAllocationsToMove,
13547  maxGpuBytesToMove, maxGpuAllocationsToMove,
13548  commandBuffer);
13549  if(pBlockVectorCtx->res != VK_SUCCESS)
13550  {
13551  res = pBlockVectorCtx->res;
13552  }
13553  }
13554 
13555  return res;
13556 }
13557 
13559 // VmaRecorder
13560 
13561 #if VMA_RECORDING_ENABLED
13562 
13563 VmaRecorder::VmaRecorder() :
13564  m_UseMutex(true),
13565  m_Flags(0),
13566  m_File(VMA_NULL),
13567  m_Freq(INT64_MAX),
13568  m_StartCounter(INT64_MAX)
13569 {
13570 }
13571 
13572 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13573 {
13574  m_UseMutex = useMutex;
13575  m_Flags = settings.flags;
13576 
13577  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13578  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13579 
13580  // Open file for writing.
13581  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13582  if(err != 0)
13583  {
13584  return VK_ERROR_INITIALIZATION_FAILED;
13585  }
13586 
13587  // Write header.
13588  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13589  fprintf(m_File, "%s\n", "1,5");
13590 
13591  return VK_SUCCESS;
13592 }
13593 
13594 VmaRecorder::~VmaRecorder()
13595 {
13596  if(m_File != VMA_NULL)
13597  {
13598  fclose(m_File);
13599  }
13600 }
13601 
13602 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13603 {
13604  CallParams callParams;
13605  GetBasicParams(callParams);
13606 
13607  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13608  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13609  Flush();
13610 }
13611 
13612 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13613 {
13614  CallParams callParams;
13615  GetBasicParams(callParams);
13616 
13617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13618  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13619  Flush();
13620 }
13621 
13622 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13623 {
13624  CallParams callParams;
13625  GetBasicParams(callParams);
13626 
13627  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13628  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13629  createInfo.memoryTypeIndex,
13630  createInfo.flags,
13631  createInfo.blockSize,
13632  (uint64_t)createInfo.minBlockCount,
13633  (uint64_t)createInfo.maxBlockCount,
13634  createInfo.frameInUseCount,
13635  pool);
13636  Flush();
13637 }
13638 
13639 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13640 {
13641  CallParams callParams;
13642  GetBasicParams(callParams);
13643 
13644  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13645  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13646  pool);
13647  Flush();
13648 }
13649 
13650 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13651  const VkMemoryRequirements& vkMemReq,
13652  const VmaAllocationCreateInfo& createInfo,
13653  VmaAllocation allocation)
13654 {
13655  CallParams callParams;
13656  GetBasicParams(callParams);
13657 
13658  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13659  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13660  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13661  vkMemReq.size,
13662  vkMemReq.alignment,
13663  vkMemReq.memoryTypeBits,
13664  createInfo.flags,
13665  createInfo.usage,
13666  createInfo.requiredFlags,
13667  createInfo.preferredFlags,
13668  createInfo.memoryTypeBits,
13669  createInfo.pool,
13670  allocation,
13671  userDataStr.GetString());
13672  Flush();
13673 }
13674 
13675 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13676  const VkMemoryRequirements& vkMemReq,
13677  const VmaAllocationCreateInfo& createInfo,
13678  uint64_t allocationCount,
13679  const VmaAllocation* pAllocations)
13680 {
13681  CallParams callParams;
13682  GetBasicParams(callParams);
13683 
13684  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13685  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13686  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13687  vkMemReq.size,
13688  vkMemReq.alignment,
13689  vkMemReq.memoryTypeBits,
13690  createInfo.flags,
13691  createInfo.usage,
13692  createInfo.requiredFlags,
13693  createInfo.preferredFlags,
13694  createInfo.memoryTypeBits,
13695  createInfo.pool);
13696  PrintPointerList(allocationCount, pAllocations);
13697  fprintf(m_File, ",%s\n", userDataStr.GetString());
13698  Flush();
13699 }
13700 
13701 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13702  const VkMemoryRequirements& vkMemReq,
13703  bool requiresDedicatedAllocation,
13704  bool prefersDedicatedAllocation,
13705  const VmaAllocationCreateInfo& createInfo,
13706  VmaAllocation allocation)
13707 {
13708  CallParams callParams;
13709  GetBasicParams(callParams);
13710 
13711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13712  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13713  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13714  vkMemReq.size,
13715  vkMemReq.alignment,
13716  vkMemReq.memoryTypeBits,
13717  requiresDedicatedAllocation ? 1 : 0,
13718  prefersDedicatedAllocation ? 1 : 0,
13719  createInfo.flags,
13720  createInfo.usage,
13721  createInfo.requiredFlags,
13722  createInfo.preferredFlags,
13723  createInfo.memoryTypeBits,
13724  createInfo.pool,
13725  allocation,
13726  userDataStr.GetString());
13727  Flush();
13728 }
13729 
13730 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13731  const VkMemoryRequirements& vkMemReq,
13732  bool requiresDedicatedAllocation,
13733  bool prefersDedicatedAllocation,
13734  const VmaAllocationCreateInfo& createInfo,
13735  VmaAllocation allocation)
13736 {
13737  CallParams callParams;
13738  GetBasicParams(callParams);
13739 
13740  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13741  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13742  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13743  vkMemReq.size,
13744  vkMemReq.alignment,
13745  vkMemReq.memoryTypeBits,
13746  requiresDedicatedAllocation ? 1 : 0,
13747  prefersDedicatedAllocation ? 1 : 0,
13748  createInfo.flags,
13749  createInfo.usage,
13750  createInfo.requiredFlags,
13751  createInfo.preferredFlags,
13752  createInfo.memoryTypeBits,
13753  createInfo.pool,
13754  allocation,
13755  userDataStr.GetString());
13756  Flush();
13757 }
13758 
13759 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13760  VmaAllocation allocation)
13761 {
13762  CallParams callParams;
13763  GetBasicParams(callParams);
13764 
13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13766  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13767  allocation);
13768  Flush();
13769 }
13770 
13771 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13772  uint64_t allocationCount,
13773  const VmaAllocation* pAllocations)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13780  PrintPointerList(allocationCount, pAllocations);
13781  fprintf(m_File, "\n");
13782  Flush();
13783 }
13784 
13785 void VmaRecorder::RecordResizeAllocation(
13786  uint32_t frameIndex,
13787  VmaAllocation allocation,
13788  VkDeviceSize newSize)
13789 {
13790  CallParams callParams;
13791  GetBasicParams(callParams);
13792 
13793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13794  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13795  allocation, newSize);
13796  Flush();
13797 }
13798 
13799 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13800  VmaAllocation allocation,
13801  const void* pUserData)
13802 {
13803  CallParams callParams;
13804  GetBasicParams(callParams);
13805 
13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807  UserDataString userDataStr(
13808  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13809  pUserData);
13810  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13811  allocation,
13812  userDataStr.GetString());
13813  Flush();
13814 }
13815 
13816 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13817  VmaAllocation allocation)
13818 {
13819  CallParams callParams;
13820  GetBasicParams(callParams);
13821 
13822  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13823  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13824  allocation);
13825  Flush();
13826 }
13827 
13828 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13829  VmaAllocation allocation)
13830 {
13831  CallParams callParams;
13832  GetBasicParams(callParams);
13833 
13834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13835  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13836  allocation);
13837  Flush();
13838 }
13839 
13840 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13841  VmaAllocation allocation)
13842 {
13843  CallParams callParams;
13844  GetBasicParams(callParams);
13845 
13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13848  allocation);
13849  Flush();
13850 }
13851 
13852 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13853  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13854 {
13855  CallParams callParams;
13856  GetBasicParams(callParams);
13857 
13858  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13859  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13860  allocation,
13861  offset,
13862  size);
13863  Flush();
13864 }
13865 
13866 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13867  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13868 {
13869  CallParams callParams;
13870  GetBasicParams(callParams);
13871 
13872  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13873  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13874  allocation,
13875  offset,
13876  size);
13877  Flush();
13878 }
13879 
13880 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13881  const VkBufferCreateInfo& bufCreateInfo,
13882  const VmaAllocationCreateInfo& allocCreateInfo,
13883  VmaAllocation allocation)
13884 {
13885  CallParams callParams;
13886  GetBasicParams(callParams);
13887 
13888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13889  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13890  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13891  bufCreateInfo.flags,
13892  bufCreateInfo.size,
13893  bufCreateInfo.usage,
13894  bufCreateInfo.sharingMode,
13895  allocCreateInfo.flags,
13896  allocCreateInfo.usage,
13897  allocCreateInfo.requiredFlags,
13898  allocCreateInfo.preferredFlags,
13899  allocCreateInfo.memoryTypeBits,
13900  allocCreateInfo.pool,
13901  allocation,
13902  userDataStr.GetString());
13903  Flush();
13904 }
13905 
13906 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13907  const VkImageCreateInfo& imageCreateInfo,
13908  const VmaAllocationCreateInfo& allocCreateInfo,
13909  VmaAllocation allocation)
13910 {
13911  CallParams callParams;
13912  GetBasicParams(callParams);
13913 
13914  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13915  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13916  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13917  imageCreateInfo.flags,
13918  imageCreateInfo.imageType,
13919  imageCreateInfo.format,
13920  imageCreateInfo.extent.width,
13921  imageCreateInfo.extent.height,
13922  imageCreateInfo.extent.depth,
13923  imageCreateInfo.mipLevels,
13924  imageCreateInfo.arrayLayers,
13925  imageCreateInfo.samples,
13926  imageCreateInfo.tiling,
13927  imageCreateInfo.usage,
13928  imageCreateInfo.sharingMode,
13929  imageCreateInfo.initialLayout,
13930  allocCreateInfo.flags,
13931  allocCreateInfo.usage,
13932  allocCreateInfo.requiredFlags,
13933  allocCreateInfo.preferredFlags,
13934  allocCreateInfo.memoryTypeBits,
13935  allocCreateInfo.pool,
13936  allocation,
13937  userDataStr.GetString());
13938  Flush();
13939 }
13940 
13941 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13942  VmaAllocation allocation)
13943 {
13944  CallParams callParams;
13945  GetBasicParams(callParams);
13946 
13947  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13948  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13949  allocation);
13950  Flush();
13951 }
13952 
13953 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13954  VmaAllocation allocation)
13955 {
13956  CallParams callParams;
13957  GetBasicParams(callParams);
13958 
13959  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13960  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13961  allocation);
13962  Flush();
13963 }
13964 
13965 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13966  VmaAllocation allocation)
13967 {
13968  CallParams callParams;
13969  GetBasicParams(callParams);
13970 
13971  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13972  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13973  allocation);
13974  Flush();
13975 }
13976 
13977 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13978  VmaAllocation allocation)
13979 {
13980  CallParams callParams;
13981  GetBasicParams(callParams);
13982 
13983  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13984  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13985  allocation);
13986  Flush();
13987 }
13988 
13989 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13990  VmaPool pool)
13991 {
13992  CallParams callParams;
13993  GetBasicParams(callParams);
13994 
13995  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13996  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13997  pool);
13998  Flush();
13999 }
14000 
14001 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14002  const VmaDefragmentationInfo2& info,
14004 {
14005  CallParams callParams;
14006  GetBasicParams(callParams);
14007 
14008  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14009  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14010  info.flags);
14011  PrintPointerList(info.allocationCount, info.pAllocations);
14012  fprintf(m_File, ",");
14013  PrintPointerList(info.poolCount, info.pPools);
14014  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14015  info.maxCpuBytesToMove,
14017  info.maxGpuBytesToMove,
14019  info.commandBuffer,
14020  ctx);
14021  Flush();
14022 }
14023 
14024 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14026 {
14027  CallParams callParams;
14028  GetBasicParams(callParams);
14029 
14030  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14031  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14032  ctx);
14033  Flush();
14034 }
14035 
14036 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14037 {
14038  if(pUserData != VMA_NULL)
14039  {
14040  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14041  {
14042  m_Str = (const char*)pUserData;
14043  }
14044  else
14045  {
14046  sprintf_s(m_PtrStr, "%p", pUserData);
14047  m_Str = m_PtrStr;
14048  }
14049  }
14050  else
14051  {
14052  m_Str = "";
14053  }
14054 }
14055 
14056 void VmaRecorder::WriteConfiguration(
14057  const VkPhysicalDeviceProperties& devProps,
14058  const VkPhysicalDeviceMemoryProperties& memProps,
14059  bool dedicatedAllocationExtensionEnabled)
14060 {
14061  fprintf(m_File, "Config,Begin\n");
14062 
14063  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14064  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14065  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14066  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14067  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14068  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14069 
14070  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14071  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14072  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14073 
14074  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14075  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14076  {
14077  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14078  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14079  }
14080  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14081  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14082  {
14083  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14084  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14085  }
14086 
14087  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14088 
14089  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14090  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14091  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14092  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14093  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14094  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14095  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14096  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14097  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14098 
14099  fprintf(m_File, "Config,End\n");
14100 }
14101 
14102 void VmaRecorder::GetBasicParams(CallParams& outParams)
14103 {
14104  outParams.threadId = GetCurrentThreadId();
14105 
14106  LARGE_INTEGER counter;
14107  QueryPerformanceCounter(&counter);
14108  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14109 }
14110 
14111 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14112 {
14113  if(count)
14114  {
14115  fprintf(m_File, "%p", pItems[0]);
14116  for(uint64_t i = 1; i < count; ++i)
14117  {
14118  fprintf(m_File, " %p", pItems[i]);
14119  }
14120  }
14121 }
14122 
14123 void VmaRecorder::Flush()
14124 {
14125  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14126  {
14127  fflush(m_File);
14128  }
14129 }
14130 
14131 #endif // #if VMA_RECORDING_ENABLED
14132 
14134 // VmaAllocationObjectAllocator
14135 
14136 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14137  m_Allocator(pAllocationCallbacks, 1024)
14138 {
14139 }
14140 
14141 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14142 {
14143  VmaMutexLock mutexLock(m_Mutex);
14144  return m_Allocator.Alloc();
14145 }
14146 
14147 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14148 {
14149  VmaMutexLock mutexLock(m_Mutex);
14150  m_Allocator.Free(hAlloc);
14151 }
14152 
14154 // VmaAllocator_T
14155 
14156 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14157  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14158  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14159  m_hDevice(pCreateInfo->device),
14160  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14161  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14162  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14163  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14164  m_PreferredLargeHeapBlockSize(0),
14165  m_PhysicalDevice(pCreateInfo->physicalDevice),
14166  m_CurrentFrameIndex(0),
14167  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14168  m_NextPoolId(0)
14170  ,m_pRecorder(VMA_NULL)
14171 #endif
14172 {
14173  if(VMA_DEBUG_DETECT_CORRUPTION)
14174  {
14175  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14176  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14177  }
14178 
14179  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14180 
14181 #if !(VMA_DEDICATED_ALLOCATION)
14183  {
14184  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14185  }
14186 #endif
14187 
14188  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14189  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14190  memset(&m_MemProps, 0, sizeof(m_MemProps));
14191 
14192  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14193  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14194 
14195  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14196  {
14197  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14198  }
14199 
14200  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14201  {
14202  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14203  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14204  }
14205 
14206  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14207 
14208  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14209  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14210 
14211  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14212  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14213  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14214  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14215 
14216  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14217  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14218 
14219  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14220  {
14221  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14222  {
14223  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14224  if(limit != VK_WHOLE_SIZE)
14225  {
14226  m_HeapSizeLimit[heapIndex] = limit;
14227  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14228  {
14229  m_MemProps.memoryHeaps[heapIndex].size = limit;
14230  }
14231  }
14232  }
14233  }
14234 
14235  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14236  {
14237  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14238 
14239  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14240  this,
14241  VK_NULL_HANDLE, // hParentPool
14242  memTypeIndex,
14243  preferredBlockSize,
14244  0,
14245  SIZE_MAX,
14246  GetBufferImageGranularity(),
14247  pCreateInfo->frameInUseCount,
14248  false, // isCustomPool
14249  false, // explicitBlockSize
14250  false); // linearAlgorithm
14251  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14252  // becase minBlockCount is 0.
14253  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14254 
14255  }
14256 }
14257 
14258 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14259 {
14260  VkResult res = VK_SUCCESS;
14261 
14262  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14263  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14264  {
14265 #if VMA_RECORDING_ENABLED
14266  m_pRecorder = vma_new(this, VmaRecorder)();
14267  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14268  if(res != VK_SUCCESS)
14269  {
14270  return res;
14271  }
14272  m_pRecorder->WriteConfiguration(
14273  m_PhysicalDeviceProperties,
14274  m_MemProps,
14275  m_UseKhrDedicatedAllocation);
14276  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14277 #else
14278  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14279  return VK_ERROR_FEATURE_NOT_PRESENT;
14280 #endif
14281  }
14282 
14283  return res;
14284 }
14285 
14286 VmaAllocator_T::~VmaAllocator_T()
14287 {
14288 #if VMA_RECORDING_ENABLED
14289  if(m_pRecorder != VMA_NULL)
14290  {
14291  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14292  vma_delete(this, m_pRecorder);
14293  }
14294 #endif
14295 
14296  VMA_ASSERT(m_Pools.empty());
14297 
14298  for(size_t i = GetMemoryTypeCount(); i--; )
14299  {
14300  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14301  {
14302  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14303  }
14304 
14305  vma_delete(this, m_pDedicatedAllocations[i]);
14306  vma_delete(this, m_pBlockVectors[i]);
14307  }
14308 }
14309 
14310 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14311 {
14312 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14313  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14314  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14315  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14316  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14317  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14318  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14319  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14320  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14321  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14322  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14323  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14324  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14325  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14326  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14327  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14328  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14329  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14330 #if VMA_DEDICATED_ALLOCATION
14331  if(m_UseKhrDedicatedAllocation)
14332  {
14333  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14334  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14335  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14336  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14337  }
14338 #endif // #if VMA_DEDICATED_ALLOCATION
14339 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14340 
14341 #define VMA_COPY_IF_NOT_NULL(funcName) \
14342  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14343 
14344  if(pVulkanFunctions != VMA_NULL)
14345  {
14346  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14347  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14348  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14349  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14350  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14351  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14352  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14353  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14354  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14355  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14356  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14357  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14358  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14359  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14360  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14361  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14362  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14363 #if VMA_DEDICATED_ALLOCATION
14364  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14365  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14366 #endif
14367  }
14368 
14369 #undef VMA_COPY_IF_NOT_NULL
14370 
14371  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14372  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14373  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14381  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14382  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14383  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14384  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14385  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14386  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14387  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14388  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14389  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14390 #if VMA_DEDICATED_ALLOCATION
14391  if(m_UseKhrDedicatedAllocation)
14392  {
14393  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14394  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14395  }
14396 #endif
14397 }
14398 
14399 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14400 {
14401  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14402  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14403  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14404  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14405 }
14406 
14407 VkResult VmaAllocator_T::AllocateMemoryOfType(
14408  VkDeviceSize size,
14409  VkDeviceSize alignment,
14410  bool dedicatedAllocation,
14411  VkBuffer dedicatedBuffer,
14412  VkImage dedicatedImage,
14413  const VmaAllocationCreateInfo& createInfo,
14414  uint32_t memTypeIndex,
14415  VmaSuballocationType suballocType,
14416  size_t allocationCount,
14417  VmaAllocation* pAllocations)
14418 {
14419  VMA_ASSERT(pAllocations != VMA_NULL);
14420  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14421 
14422  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14423 
14424  // If memory type is not HOST_VISIBLE, disable MAPPED.
14425  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14426  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14427  {
14428  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14429  }
14430 
14431  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14432  VMA_ASSERT(blockVector);
14433 
14434  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14435  bool preferDedicatedMemory =
14436  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14437  dedicatedAllocation ||
14438  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14439  size > preferredBlockSize / 2;
14440 
14441  if(preferDedicatedMemory &&
14442  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14443  finalCreateInfo.pool == VK_NULL_HANDLE)
14444  {
14446  }
14447 
14448  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14449  {
14450  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14451  {
14452  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14453  }
14454  else
14455  {
14456  return AllocateDedicatedMemory(
14457  size,
14458  suballocType,
14459  memTypeIndex,
14460  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14461  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14462  finalCreateInfo.pUserData,
14463  dedicatedBuffer,
14464  dedicatedImage,
14465  allocationCount,
14466  pAllocations);
14467  }
14468  }
14469  else
14470  {
14471  VkResult res = blockVector->Allocate(
14472  m_CurrentFrameIndex.load(),
14473  size,
14474  alignment,
14475  finalCreateInfo,
14476  suballocType,
14477  allocationCount,
14478  pAllocations);
14479  if(res == VK_SUCCESS)
14480  {
14481  return res;
14482  }
14483 
14484  // 5. Try dedicated memory.
14485  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14486  {
14487  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14488  }
14489  else
14490  {
14491  res = AllocateDedicatedMemory(
14492  size,
14493  suballocType,
14494  memTypeIndex,
14495  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14496  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14497  finalCreateInfo.pUserData,
14498  dedicatedBuffer,
14499  dedicatedImage,
14500  allocationCount,
14501  pAllocations);
14502  if(res == VK_SUCCESS)
14503  {
14504  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14505  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14506  return VK_SUCCESS;
14507  }
14508  else
14509  {
14510  // Everything failed: Return error code.
14511  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14512  return res;
14513  }
14514  }
14515  }
14516 }
14517 
14518 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14519  VkDeviceSize size,
14520  VmaSuballocationType suballocType,
14521  uint32_t memTypeIndex,
14522  bool map,
14523  bool isUserDataString,
14524  void* pUserData,
14525  VkBuffer dedicatedBuffer,
14526  VkImage dedicatedImage,
14527  size_t allocationCount,
14528  VmaAllocation* pAllocations)
14529 {
14530  VMA_ASSERT(allocationCount > 0 && pAllocations);
14531 
14532  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14533  allocInfo.memoryTypeIndex = memTypeIndex;
14534  allocInfo.allocationSize = size;
14535 
14536 #if VMA_DEDICATED_ALLOCATION
14537  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14538  if(m_UseKhrDedicatedAllocation)
14539  {
14540  if(dedicatedBuffer != VK_NULL_HANDLE)
14541  {
14542  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14543  dedicatedAllocInfo.buffer = dedicatedBuffer;
14544  allocInfo.pNext = &dedicatedAllocInfo;
14545  }
14546  else if(dedicatedImage != VK_NULL_HANDLE)
14547  {
14548  dedicatedAllocInfo.image = dedicatedImage;
14549  allocInfo.pNext = &dedicatedAllocInfo;
14550  }
14551  }
14552 #endif // #if VMA_DEDICATED_ALLOCATION
14553 
14554  size_t allocIndex;
14555  VkResult res = VK_SUCCESS;
14556  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14557  {
14558  res = AllocateDedicatedMemoryPage(
14559  size,
14560  suballocType,
14561  memTypeIndex,
14562  allocInfo,
14563  map,
14564  isUserDataString,
14565  pUserData,
14566  pAllocations + allocIndex);
14567  if(res != VK_SUCCESS)
14568  {
14569  break;
14570  }
14571  }
14572 
14573  if(res == VK_SUCCESS)
14574  {
14575  // Register them in m_pDedicatedAllocations.
14576  {
14577  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14578  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14579  VMA_ASSERT(pDedicatedAllocations);
14580  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14581  {
14582  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14583  }
14584  }
14585 
14586  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14587  }
14588  else
14589  {
14590  // Free all already created allocations.
14591  while(allocIndex--)
14592  {
14593  VmaAllocation currAlloc = pAllocations[allocIndex];
14594  VkDeviceMemory hMemory = currAlloc->GetMemory();
14595 
14596  /*
14597  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14598  before vkFreeMemory.
14599 
14600  if(currAlloc->GetMappedData() != VMA_NULL)
14601  {
14602  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14603  }
14604  */
14605 
14606  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14607 
14608  currAlloc->SetUserData(this, VMA_NULL);
14609  currAlloc->Dtor();
14610  m_AllocationObjectAllocator.Free(currAlloc);
14611  }
14612 
14613  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14614  }
14615 
14616  return res;
14617 }
14618 
14619 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14620  VkDeviceSize size,
14621  VmaSuballocationType suballocType,
14622  uint32_t memTypeIndex,
14623  const VkMemoryAllocateInfo& allocInfo,
14624  bool map,
14625  bool isUserDataString,
14626  void* pUserData,
14627  VmaAllocation* pAllocation)
14628 {
14629  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14630  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14631  if(res < 0)
14632  {
14633  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14634  return res;
14635  }
14636 
14637  void* pMappedData = VMA_NULL;
14638  if(map)
14639  {
14640  res = (*m_VulkanFunctions.vkMapMemory)(
14641  m_hDevice,
14642  hMemory,
14643  0,
14644  VK_WHOLE_SIZE,
14645  0,
14646  &pMappedData);
14647  if(res < 0)
14648  {
14649  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14650  FreeVulkanMemory(memTypeIndex, size, hMemory);
14651  return res;
14652  }
14653  }
14654 
14655  *pAllocation = m_AllocationObjectAllocator.Allocate();
14656  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14657  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14658  (*pAllocation)->SetUserData(this, pUserData);
14659  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14660  {
14661  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14662  }
14663 
14664  return VK_SUCCESS;
14665 }
14666 
14667 void VmaAllocator_T::GetBufferMemoryRequirements(
14668  VkBuffer hBuffer,
14669  VkMemoryRequirements& memReq,
14670  bool& requiresDedicatedAllocation,
14671  bool& prefersDedicatedAllocation) const
14672 {
14673 #if VMA_DEDICATED_ALLOCATION
14674  if(m_UseKhrDedicatedAllocation)
14675  {
14676  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14677  memReqInfo.buffer = hBuffer;
14678 
14679  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14680 
14681  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14682  memReq2.pNext = &memDedicatedReq;
14683 
14684  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14685 
14686  memReq = memReq2.memoryRequirements;
14687  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14688  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14689  }
14690  else
14691 #endif // #if VMA_DEDICATED_ALLOCATION
14692  {
14693  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14694  requiresDedicatedAllocation = false;
14695  prefersDedicatedAllocation = false;
14696  }
14697 }
14698 
14699 void VmaAllocator_T::GetImageMemoryRequirements(
14700  VkImage hImage,
14701  VkMemoryRequirements& memReq,
14702  bool& requiresDedicatedAllocation,
14703  bool& prefersDedicatedAllocation) const
14704 {
14705 #if VMA_DEDICATED_ALLOCATION
14706  if(m_UseKhrDedicatedAllocation)
14707  {
14708  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14709  memReqInfo.image = hImage;
14710 
14711  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14712 
14713  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14714  memReq2.pNext = &memDedicatedReq;
14715 
14716  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14717 
14718  memReq = memReq2.memoryRequirements;
14719  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14720  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14721  }
14722  else
14723 #endif // #if VMA_DEDICATED_ALLOCATION
14724  {
14725  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14726  requiresDedicatedAllocation = false;
14727  prefersDedicatedAllocation = false;
14728  }
14729 }
14730 
14731 VkResult VmaAllocator_T::AllocateMemory(
14732  const VkMemoryRequirements& vkMemReq,
14733  bool requiresDedicatedAllocation,
14734  bool prefersDedicatedAllocation,
14735  VkBuffer dedicatedBuffer,
14736  VkImage dedicatedImage,
14737  const VmaAllocationCreateInfo& createInfo,
14738  VmaSuballocationType suballocType,
14739  size_t allocationCount,
14740  VmaAllocation* pAllocations)
14741 {
14742  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14743 
14744  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14745 
14746  if(vkMemReq.size == 0)
14747  {
14748  return VK_ERROR_VALIDATION_FAILED_EXT;
14749  }
14750  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14751  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14752  {
14753  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14754  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14755  }
14756  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14758  {
14759  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14760  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14761  }
14762  if(requiresDedicatedAllocation)
14763  {
14764  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14765  {
14766  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14767  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14768  }
14769  if(createInfo.pool != VK_NULL_HANDLE)
14770  {
14771  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14772  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14773  }
14774  }
14775  if((createInfo.pool != VK_NULL_HANDLE) &&
14776  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14777  {
14778  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14780  }
14781 
14782  if(createInfo.pool != VK_NULL_HANDLE)
14783  {
14784  const VkDeviceSize alignmentForPool = VMA_MAX(
14785  vkMemReq.alignment,
14786  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14787  return createInfo.pool->m_BlockVector.Allocate(
14788  m_CurrentFrameIndex.load(),
14789  vkMemReq.size,
14790  alignmentForPool,
14791  createInfo,
14792  suballocType,
14793  allocationCount,
14794  pAllocations);
14795  }
14796  else
14797  {
14798  // Bit mask of memory Vulkan types acceptable for this allocation.
14799  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14800  uint32_t memTypeIndex = UINT32_MAX;
14801  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14802  if(res == VK_SUCCESS)
14803  {
14804  VkDeviceSize alignmentForMemType = VMA_MAX(
14805  vkMemReq.alignment,
14806  GetMemoryTypeMinAlignment(memTypeIndex));
14807 
14808  res = AllocateMemoryOfType(
14809  vkMemReq.size,
14810  alignmentForMemType,
14811  requiresDedicatedAllocation || prefersDedicatedAllocation,
14812  dedicatedBuffer,
14813  dedicatedImage,
14814  createInfo,
14815  memTypeIndex,
14816  suballocType,
14817  allocationCount,
14818  pAllocations);
14819  // Succeeded on first try.
14820  if(res == VK_SUCCESS)
14821  {
14822  return res;
14823  }
14824  // Allocation from this memory type failed. Try other compatible memory types.
14825  else
14826  {
14827  for(;;)
14828  {
14829  // Remove old memTypeIndex from list of possibilities.
14830  memoryTypeBits &= ~(1u << memTypeIndex);
14831  // Find alternative memTypeIndex.
14832  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14833  if(res == VK_SUCCESS)
14834  {
14835  alignmentForMemType = VMA_MAX(
14836  vkMemReq.alignment,
14837  GetMemoryTypeMinAlignment(memTypeIndex));
14838 
14839  res = AllocateMemoryOfType(
14840  vkMemReq.size,
14841  alignmentForMemType,
14842  requiresDedicatedAllocation || prefersDedicatedAllocation,
14843  dedicatedBuffer,
14844  dedicatedImage,
14845  createInfo,
14846  memTypeIndex,
14847  suballocType,
14848  allocationCount,
14849  pAllocations);
14850  // Allocation from this alternative memory type succeeded.
14851  if(res == VK_SUCCESS)
14852  {
14853  return res;
14854  }
14855  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14856  }
14857  // No other matching memory type index could be found.
14858  else
14859  {
14860  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14861  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14862  }
14863  }
14864  }
14865  }
14866  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14867  else
14868  return res;
14869  }
14870 }
14871 
14872 void VmaAllocator_T::FreeMemory(
14873  size_t allocationCount,
14874  const VmaAllocation* pAllocations)
14875 {
14876  VMA_ASSERT(pAllocations);
14877 
14878  for(size_t allocIndex = allocationCount; allocIndex--; )
14879  {
14880  VmaAllocation allocation = pAllocations[allocIndex];
14881 
14882  if(allocation != VK_NULL_HANDLE)
14883  {
14884  if(TouchAllocation(allocation))
14885  {
14886  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14887  {
14888  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14889  }
14890 
14891  switch(allocation->GetType())
14892  {
14893  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14894  {
14895  VmaBlockVector* pBlockVector = VMA_NULL;
14896  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14897  if(hPool != VK_NULL_HANDLE)
14898  {
14899  pBlockVector = &hPool->m_BlockVector;
14900  }
14901  else
14902  {
14903  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14904  pBlockVector = m_pBlockVectors[memTypeIndex];
14905  }
14906  pBlockVector->Free(allocation);
14907  }
14908  break;
14909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14910  FreeDedicatedMemory(allocation);
14911  break;
14912  default:
14913  VMA_ASSERT(0);
14914  }
14915  }
14916 
14917  allocation->SetUserData(this, VMA_NULL);
14918  allocation->Dtor();
14919  m_AllocationObjectAllocator.Free(allocation);
14920  }
14921  }
14922 }
14923 
14924 VkResult VmaAllocator_T::ResizeAllocation(
14925  const VmaAllocation alloc,
14926  VkDeviceSize newSize)
14927 {
14928  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14929  {
14930  return VK_ERROR_VALIDATION_FAILED_EXT;
14931  }
14932  if(newSize == alloc->GetSize())
14933  {
14934  return VK_SUCCESS;
14935  }
14936 
14937  switch(alloc->GetType())
14938  {
14939  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14940  return VK_ERROR_FEATURE_NOT_PRESENT;
14941  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14942  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14943  {
14944  alloc->ChangeSize(newSize);
14945  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14946  return VK_SUCCESS;
14947  }
14948  else
14949  {
14950  return VK_ERROR_OUT_OF_POOL_MEMORY;
14951  }
14952  default:
14953  VMA_ASSERT(0);
14954  return VK_ERROR_VALIDATION_FAILED_EXT;
14955  }
14956 }
14957 
14958 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14959 {
14960  // Initialize.
14961  InitStatInfo(pStats->total);
14962  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14963  InitStatInfo(pStats->memoryType[i]);
14964  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14965  InitStatInfo(pStats->memoryHeap[i]);
14966 
14967  // Process default pools.
14968  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14969  {
14970  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14971  VMA_ASSERT(pBlockVector);
14972  pBlockVector->AddStats(pStats);
14973  }
14974 
14975  // Process custom pools.
14976  {
14977  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14978  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14979  {
14980  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14981  }
14982  }
14983 
14984  // Process dedicated allocations.
14985  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14986  {
14987  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14988  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14989  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14990  VMA_ASSERT(pDedicatedAllocVector);
14991  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14992  {
14993  VmaStatInfo allocationStatInfo;
14994  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14995  VmaAddStatInfo(pStats->total, allocationStatInfo);
14996  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14997  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14998  }
14999  }
15000 
15001  // Postprocess.
15002  VmaPostprocessCalcStatInfo(pStats->total);
15003  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15004  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15005  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15006  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15007 }
15008 
15009 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15010 
15011 VkResult VmaAllocator_T::DefragmentationBegin(
15012  const VmaDefragmentationInfo2& info,
15013  VmaDefragmentationStats* pStats,
15014  VmaDefragmentationContext* pContext)
15015 {
15016  if(info.pAllocationsChanged != VMA_NULL)
15017  {
15018  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15019  }
15020 
15021  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15022  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15023 
15024  (*pContext)->AddPools(info.poolCount, info.pPools);
15025  (*pContext)->AddAllocations(
15027 
15028  VkResult res = (*pContext)->Defragment(
15031  info.commandBuffer, pStats);
15032 
15033  if(res != VK_NOT_READY)
15034  {
15035  vma_delete(this, *pContext);
15036  *pContext = VMA_NULL;
15037  }
15038 
15039  return res;
15040 }
15041 
15042 VkResult VmaAllocator_T::DefragmentationEnd(
15043  VmaDefragmentationContext context)
15044 {
15045  vma_delete(this, context);
15046  return VK_SUCCESS;
15047 }
15048 
15049 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15050 {
15051  if(hAllocation->CanBecomeLost())
15052  {
15053  /*
15054  Warning: This is a carefully designed algorithm.
15055  Do not modify unless you really know what you're doing :)
15056  */
15057  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15058  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15059  for(;;)
15060  {
15061  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15062  {
15063  pAllocationInfo->memoryType = UINT32_MAX;
15064  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15065  pAllocationInfo->offset = 0;
15066  pAllocationInfo->size = hAllocation->GetSize();
15067  pAllocationInfo->pMappedData = VMA_NULL;
15068  pAllocationInfo->pUserData = hAllocation->GetUserData();
15069  return;
15070  }
15071  else if(localLastUseFrameIndex == localCurrFrameIndex)
15072  {
15073  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15074  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15075  pAllocationInfo->offset = hAllocation->GetOffset();
15076  pAllocationInfo->size = hAllocation->GetSize();
15077  pAllocationInfo->pMappedData = VMA_NULL;
15078  pAllocationInfo->pUserData = hAllocation->GetUserData();
15079  return;
15080  }
15081  else // Last use time earlier than current time.
15082  {
15083  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15084  {
15085  localLastUseFrameIndex = localCurrFrameIndex;
15086  }
15087  }
15088  }
15089  }
15090  else
15091  {
15092 #if VMA_STATS_STRING_ENABLED
15093  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15094  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15095  for(;;)
15096  {
15097  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15098  if(localLastUseFrameIndex == localCurrFrameIndex)
15099  {
15100  break;
15101  }
15102  else // Last use time earlier than current time.
15103  {
15104  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15105  {
15106  localLastUseFrameIndex = localCurrFrameIndex;
15107  }
15108  }
15109  }
15110 #endif
15111 
15112  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15113  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15114  pAllocationInfo->offset = hAllocation->GetOffset();
15115  pAllocationInfo->size = hAllocation->GetSize();
15116  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15117  pAllocationInfo->pUserData = hAllocation->GetUserData();
15118  }
15119 }
15120 
15121 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15122 {
15123  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15124  if(hAllocation->CanBecomeLost())
15125  {
15126  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15127  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15128  for(;;)
15129  {
15130  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15131  {
15132  return false;
15133  }
15134  else if(localLastUseFrameIndex == localCurrFrameIndex)
15135  {
15136  return true;
15137  }
15138  else // Last use time earlier than current time.
15139  {
15140  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15141  {
15142  localLastUseFrameIndex = localCurrFrameIndex;
15143  }
15144  }
15145  }
15146  }
15147  else
15148  {
15149 #if VMA_STATS_STRING_ENABLED
15150  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15151  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15152  for(;;)
15153  {
15154  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15155  if(localLastUseFrameIndex == localCurrFrameIndex)
15156  {
15157  break;
15158  }
15159  else // Last use time earlier than current time.
15160  {
15161  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15162  {
15163  localLastUseFrameIndex = localCurrFrameIndex;
15164  }
15165  }
15166  }
15167 #endif
15168 
15169  return true;
15170  }
15171 }
15172 
15173 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15174 {
15175  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15176 
15177  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15178 
15179  if(newCreateInfo.maxBlockCount == 0)
15180  {
15181  newCreateInfo.maxBlockCount = SIZE_MAX;
15182  }
15183  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15184  {
15185  return VK_ERROR_INITIALIZATION_FAILED;
15186  }
15187 
15188  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15189 
15190  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15191 
15192  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15193  if(res != VK_SUCCESS)
15194  {
15195  vma_delete(this, *pPool);
15196  *pPool = VMA_NULL;
15197  return res;
15198  }
15199 
15200  // Add to m_Pools.
15201  {
15202  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15203  (*pPool)->SetId(m_NextPoolId++);
15204  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15205  }
15206 
15207  return VK_SUCCESS;
15208 }
15209 
15210 void VmaAllocator_T::DestroyPool(VmaPool pool)
15211 {
15212  // Remove from m_Pools.
15213  {
15214  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15215  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15216  VMA_ASSERT(success && "Pool not found in Allocator.");
15217  }
15218 
15219  vma_delete(this, pool);
15220 }
15221 
15222 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15223 {
15224  pool->m_BlockVector.GetPoolStats(pPoolStats);
15225 }
15226 
15227 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15228 {
15229  m_CurrentFrameIndex.store(frameIndex);
15230 }
15231 
15232 void VmaAllocator_T::MakePoolAllocationsLost(
15233  VmaPool hPool,
15234  size_t* pLostAllocationCount)
15235 {
15236  hPool->m_BlockVector.MakePoolAllocationsLost(
15237  m_CurrentFrameIndex.load(),
15238  pLostAllocationCount);
15239 }
15240 
15241 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15242 {
15243  return hPool->m_BlockVector.CheckCorruption();
15244 }
15245 
15246 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15247 {
15248  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15249 
15250  // Process default pools.
15251  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15252  {
15253  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15254  {
15255  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15256  VMA_ASSERT(pBlockVector);
15257  VkResult localRes = pBlockVector->CheckCorruption();
15258  switch(localRes)
15259  {
15260  case VK_ERROR_FEATURE_NOT_PRESENT:
15261  break;
15262  case VK_SUCCESS:
15263  finalRes = VK_SUCCESS;
15264  break;
15265  default:
15266  return localRes;
15267  }
15268  }
15269  }
15270 
15271  // Process custom pools.
15272  {
15273  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15274  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15275  {
15276  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15277  {
15278  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15279  switch(localRes)
15280  {
15281  case VK_ERROR_FEATURE_NOT_PRESENT:
15282  break;
15283  case VK_SUCCESS:
15284  finalRes = VK_SUCCESS;
15285  break;
15286  default:
15287  return localRes;
15288  }
15289  }
15290  }
15291  }
15292 
15293  return finalRes;
15294 }
15295 
15296 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15297 {
15298  *pAllocation = m_AllocationObjectAllocator.Allocate();
15299  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15300  (*pAllocation)->InitLost();
15301 }
15302 
15303 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15304 {
15305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15306 
15307  VkResult res;
15308  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15309  {
15310  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15311  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15312  {
15313  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15314  if(res == VK_SUCCESS)
15315  {
15316  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15317  }
15318  }
15319  else
15320  {
15321  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15322  }
15323  }
15324  else
15325  {
15326  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15327  }
15328 
15329  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15330  {
15331  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15332  }
15333 
15334  return res;
15335 }
15336 
15337 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15338 {
15339  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15340  {
15341  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15342  }
15343 
15344  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15345 
15346  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15347  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15348  {
15349  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15350  m_HeapSizeLimit[heapIndex] += size;
15351  }
15352 }
15353 
15354 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15355 {
15356  if(hAllocation->CanBecomeLost())
15357  {
15358  return VK_ERROR_MEMORY_MAP_FAILED;
15359  }
15360 
15361  switch(hAllocation->GetType())
15362  {
15363  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15364  {
15365  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15366  char *pBytes = VMA_NULL;
15367  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15368  if(res == VK_SUCCESS)
15369  {
15370  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15371  hAllocation->BlockAllocMap();
15372  }
15373  return res;
15374  }
15375  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15376  return hAllocation->DedicatedAllocMap(this, ppData);
15377  default:
15378  VMA_ASSERT(0);
15379  return VK_ERROR_MEMORY_MAP_FAILED;
15380  }
15381 }
15382 
15383 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15384 {
15385  switch(hAllocation->GetType())
15386  {
15387  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15388  {
15389  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15390  hAllocation->BlockAllocUnmap();
15391  pBlock->Unmap(this, 1);
15392  }
15393  break;
15394  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15395  hAllocation->DedicatedAllocUnmap(this);
15396  break;
15397  default:
15398  VMA_ASSERT(0);
15399  }
15400 }
15401 
15402 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15403 {
15404  VkResult res = VK_SUCCESS;
15405  switch(hAllocation->GetType())
15406  {
15407  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15408  res = GetVulkanFunctions().vkBindBufferMemory(
15409  m_hDevice,
15410  hBuffer,
15411  hAllocation->GetMemory(),
15412  0); //memoryOffset
15413  break;
15414  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15415  {
15416  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15417  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15418  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15419  break;
15420  }
15421  default:
15422  VMA_ASSERT(0);
15423  }
15424  return res;
15425 }
15426 
15427 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15428 {
15429  VkResult res = VK_SUCCESS;
15430  switch(hAllocation->GetType())
15431  {
15432  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15433  res = GetVulkanFunctions().vkBindImageMemory(
15434  m_hDevice,
15435  hImage,
15436  hAllocation->GetMemory(),
15437  0); //memoryOffset
15438  break;
15439  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15440  {
15441  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15442  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15443  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15444  break;
15445  }
15446  default:
15447  VMA_ASSERT(0);
15448  }
15449  return res;
15450 }
15451 
15452 void VmaAllocator_T::FlushOrInvalidateAllocation(
15453  VmaAllocation hAllocation,
15454  VkDeviceSize offset, VkDeviceSize size,
15455  VMA_CACHE_OPERATION op)
15456 {
15457  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15458  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15459  {
15460  const VkDeviceSize allocationSize = hAllocation->GetSize();
15461  VMA_ASSERT(offset <= allocationSize);
15462 
15463  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15464 
15465  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15466  memRange.memory = hAllocation->GetMemory();
15467 
15468  switch(hAllocation->GetType())
15469  {
15470  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15471  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15472  if(size == VK_WHOLE_SIZE)
15473  {
15474  memRange.size = allocationSize - memRange.offset;
15475  }
15476  else
15477  {
15478  VMA_ASSERT(offset + size <= allocationSize);
15479  memRange.size = VMA_MIN(
15480  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15481  allocationSize - memRange.offset);
15482  }
15483  break;
15484 
15485  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15486  {
15487  // 1. Still within this allocation.
15488  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15489  if(size == VK_WHOLE_SIZE)
15490  {
15491  size = allocationSize - offset;
15492  }
15493  else
15494  {
15495  VMA_ASSERT(offset + size <= allocationSize);
15496  }
15497  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15498 
15499  // 2. Adjust to whole block.
15500  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15501  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15502  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15503  memRange.offset += allocationOffset;
15504  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15505 
15506  break;
15507  }
15508 
15509  default:
15510  VMA_ASSERT(0);
15511  }
15512 
15513  switch(op)
15514  {
15515  case VMA_CACHE_FLUSH:
15516  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15517  break;
15518  case VMA_CACHE_INVALIDATE:
15519  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15520  break;
15521  default:
15522  VMA_ASSERT(0);
15523  }
15524  }
15525  // else: Just ignore this call.
15526 }
15527 
15528 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15529 {
15530  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15531 
15532  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15533  {
15534  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15535  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15536  VMA_ASSERT(pDedicatedAllocations);
15537  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15538  VMA_ASSERT(success);
15539  }
15540 
15541  VkDeviceMemory hMemory = allocation->GetMemory();
15542 
15543  /*
15544  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15545  before vkFreeMemory.
15546 
15547  if(allocation->GetMappedData() != VMA_NULL)
15548  {
15549  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15550  }
15551  */
15552 
15553  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15554 
15555  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15556 }
15557 
15558 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15559 {
15560  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15561  !hAllocation->CanBecomeLost() &&
15562  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15563  {
15564  void* pData = VMA_NULL;
15565  VkResult res = Map(hAllocation, &pData);
15566  if(res == VK_SUCCESS)
15567  {
15568  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15569  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15570  Unmap(hAllocation);
15571  }
15572  else
15573  {
15574  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15575  }
15576  }
15577 }
15578 
15579 #if VMA_STATS_STRING_ENABLED
15580 
15581 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15582 {
15583  bool dedicatedAllocationsStarted = false;
15584  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15585  {
15586  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15587  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15588  VMA_ASSERT(pDedicatedAllocVector);
15589  if(pDedicatedAllocVector->empty() == false)
15590  {
15591  if(dedicatedAllocationsStarted == false)
15592  {
15593  dedicatedAllocationsStarted = true;
15594  json.WriteString("DedicatedAllocations");
15595  json.BeginObject();
15596  }
15597 
15598  json.BeginString("Type ");
15599  json.ContinueString(memTypeIndex);
15600  json.EndString();
15601 
15602  json.BeginArray();
15603 
15604  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15605  {
15606  json.BeginObject(true);
15607  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15608  hAlloc->PrintParameters(json);
15609  json.EndObject();
15610  }
15611 
15612  json.EndArray();
15613  }
15614  }
15615  if(dedicatedAllocationsStarted)
15616  {
15617  json.EndObject();
15618  }
15619 
15620  {
15621  bool allocationsStarted = false;
15622  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15623  {
15624  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15625  {
15626  if(allocationsStarted == false)
15627  {
15628  allocationsStarted = true;
15629  json.WriteString("DefaultPools");
15630  json.BeginObject();
15631  }
15632 
15633  json.BeginString("Type ");
15634  json.ContinueString(memTypeIndex);
15635  json.EndString();
15636 
15637  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15638  }
15639  }
15640  if(allocationsStarted)
15641  {
15642  json.EndObject();
15643  }
15644  }
15645 
15646  // Custom pools
15647  {
15648  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15649  const size_t poolCount = m_Pools.size();
15650  if(poolCount > 0)
15651  {
15652  json.WriteString("Pools");
15653  json.BeginObject();
15654  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15655  {
15656  json.BeginString();
15657  json.ContinueString(m_Pools[poolIndex]->GetId());
15658  json.EndString();
15659 
15660  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15661  }
15662  json.EndObject();
15663  }
15664  }
15665 }
15666 
15667 #endif // #if VMA_STATS_STRING_ENABLED
15668 
15670 // Public interface
15671 
15672 VkResult vmaCreateAllocator(
15673  const VmaAllocatorCreateInfo* pCreateInfo,
15674  VmaAllocator* pAllocator)
15675 {
15676  VMA_ASSERT(pCreateInfo && pAllocator);
15677  VMA_DEBUG_LOG("vmaCreateAllocator");
15678  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15679  return (*pAllocator)->Init(pCreateInfo);
15680 }
15681 
15682 void vmaDestroyAllocator(
15683  VmaAllocator allocator)
15684 {
15685  if(allocator != VK_NULL_HANDLE)
15686  {
15687  VMA_DEBUG_LOG("vmaDestroyAllocator");
15688  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15689  vma_delete(&allocationCallbacks, allocator);
15690  }
15691 }
15692 
15694  VmaAllocator allocator,
15695  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15696 {
15697  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15698  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15699 }
15700 
15702  VmaAllocator allocator,
15703  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15704 {
15705  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15706  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15707 }
15708 
15710  VmaAllocator allocator,
15711  uint32_t memoryTypeIndex,
15712  VkMemoryPropertyFlags* pFlags)
15713 {
15714  VMA_ASSERT(allocator && pFlags);
15715  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15716  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15717 }
15718 
15720  VmaAllocator allocator,
15721  uint32_t frameIndex)
15722 {
15723  VMA_ASSERT(allocator);
15724  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15725 
15726  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15727 
15728  allocator->SetCurrentFrameIndex(frameIndex);
15729 }
15730 
15731 void vmaCalculateStats(
15732  VmaAllocator allocator,
15733  VmaStats* pStats)
15734 {
15735  VMA_ASSERT(allocator && pStats);
15736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15737  allocator->CalculateStats(pStats);
15738 }
15739 
15740 #if VMA_STATS_STRING_ENABLED
15741 
15742 void vmaBuildStatsString(
15743  VmaAllocator allocator,
15744  char** ppStatsString,
15745  VkBool32 detailedMap)
15746 {
15747  VMA_ASSERT(allocator && ppStatsString);
15748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15749 
15750  VmaStringBuilder sb(allocator);
15751  {
15752  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15753  json.BeginObject();
15754 
15755  VmaStats stats;
15756  allocator->CalculateStats(&stats);
15757 
15758  json.WriteString("Total");
15759  VmaPrintStatInfo(json, stats.total);
15760 
15761  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15762  {
15763  json.BeginString("Heap ");
15764  json.ContinueString(heapIndex);
15765  json.EndString();
15766  json.BeginObject();
15767 
15768  json.WriteString("Size");
15769  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15770 
15771  json.WriteString("Flags");
15772  json.BeginArray(true);
15773  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15774  {
15775  json.WriteString("DEVICE_LOCAL");
15776  }
15777  json.EndArray();
15778 
15779  if(stats.memoryHeap[heapIndex].blockCount > 0)
15780  {
15781  json.WriteString("Stats");
15782  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15783  }
15784 
15785  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15786  {
15787  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15788  {
15789  json.BeginString("Type ");
15790  json.ContinueString(typeIndex);
15791  json.EndString();
15792 
15793  json.BeginObject();
15794 
15795  json.WriteString("Flags");
15796  json.BeginArray(true);
15797  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15798  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15799  {
15800  json.WriteString("DEVICE_LOCAL");
15801  }
15802  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15803  {
15804  json.WriteString("HOST_VISIBLE");
15805  }
15806  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15807  {
15808  json.WriteString("HOST_COHERENT");
15809  }
15810  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15811  {
15812  json.WriteString("HOST_CACHED");
15813  }
15814  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15815  {
15816  json.WriteString("LAZILY_ALLOCATED");
15817  }
15818  json.EndArray();
15819 
15820  if(stats.memoryType[typeIndex].blockCount > 0)
15821  {
15822  json.WriteString("Stats");
15823  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15824  }
15825 
15826  json.EndObject();
15827  }
15828  }
15829 
15830  json.EndObject();
15831  }
15832  if(detailedMap == VK_TRUE)
15833  {
15834  allocator->PrintDetailedMap(json);
15835  }
15836 
15837  json.EndObject();
15838  }
15839 
15840  const size_t len = sb.GetLength();
15841  char* const pChars = vma_new_array(allocator, char, len + 1);
15842  if(len > 0)
15843  {
15844  memcpy(pChars, sb.GetData(), len);
15845  }
15846  pChars[len] = '\0';
15847  *ppStatsString = pChars;
15848 }
15849 
15850 void vmaFreeStatsString(
15851  VmaAllocator allocator,
15852  char* pStatsString)
15853 {
15854  if(pStatsString != VMA_NULL)
15855  {
15856  VMA_ASSERT(allocator);
15857  size_t len = strlen(pStatsString);
15858  vma_delete_array(allocator, pStatsString, len + 1);
15859  }
15860 }
15861 
15862 #endif // #if VMA_STATS_STRING_ENABLED
15863 
15864 /*
15865 This function is not protected by any mutex because it just reads immutable data.
15866 */
15867 VkResult vmaFindMemoryTypeIndex(
15868  VmaAllocator allocator,
15869  uint32_t memoryTypeBits,
15870  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15871  uint32_t* pMemoryTypeIndex)
15872 {
15873  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15874  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15875  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15876 
15877  if(pAllocationCreateInfo->memoryTypeBits != 0)
15878  {
15879  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15880  }
15881 
15882  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15883  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15884 
15885  // Convert usage to requiredFlags and preferredFlags.
15886  switch(pAllocationCreateInfo->usage)
15887  {
15889  break;
15891  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15892  {
15893  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15894  }
15895  break;
15897  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15898  break;
15900  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15901  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15902  {
15903  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15904  }
15905  break;
15907  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15908  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15909  break;
15910  default:
15911  break;
15912  }
15913 
15914  *pMemoryTypeIndex = UINT32_MAX;
15915  uint32_t minCost = UINT32_MAX;
15916  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15917  memTypeIndex < allocator->GetMemoryTypeCount();
15918  ++memTypeIndex, memTypeBit <<= 1)
15919  {
15920  // This memory type is acceptable according to memoryTypeBits bitmask.
15921  if((memTypeBit & memoryTypeBits) != 0)
15922  {
15923  const VkMemoryPropertyFlags currFlags =
15924  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15925  // This memory type contains requiredFlags.
15926  if((requiredFlags & ~currFlags) == 0)
15927  {
15928  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15929  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15930  // Remember memory type with lowest cost.
15931  if(currCost < minCost)
15932  {
15933  *pMemoryTypeIndex = memTypeIndex;
15934  if(currCost == 0)
15935  {
15936  return VK_SUCCESS;
15937  }
15938  minCost = currCost;
15939  }
15940  }
15941  }
15942  }
15943  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15944 }
15945 
15947  VmaAllocator allocator,
15948  const VkBufferCreateInfo* pBufferCreateInfo,
15949  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15950  uint32_t* pMemoryTypeIndex)
15951 {
15952  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15953  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15954  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15955  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15956 
15957  const VkDevice hDev = allocator->m_hDevice;
15958  VkBuffer hBuffer = VK_NULL_HANDLE;
15959  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15960  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15961  if(res == VK_SUCCESS)
15962  {
15963  VkMemoryRequirements memReq = {};
15964  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15965  hDev, hBuffer, &memReq);
15966 
15967  res = vmaFindMemoryTypeIndex(
15968  allocator,
15969  memReq.memoryTypeBits,
15970  pAllocationCreateInfo,
15971  pMemoryTypeIndex);
15972 
15973  allocator->GetVulkanFunctions().vkDestroyBuffer(
15974  hDev, hBuffer, allocator->GetAllocationCallbacks());
15975  }
15976  return res;
15977 }
15978 
15980  VmaAllocator allocator,
15981  const VkImageCreateInfo* pImageCreateInfo,
15982  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15983  uint32_t* pMemoryTypeIndex)
15984 {
15985  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15986  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15987  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15988  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15989 
15990  const VkDevice hDev = allocator->m_hDevice;
15991  VkImage hImage = VK_NULL_HANDLE;
15992  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15993  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15994  if(res == VK_SUCCESS)
15995  {
15996  VkMemoryRequirements memReq = {};
15997  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15998  hDev, hImage, &memReq);
15999 
16000  res = vmaFindMemoryTypeIndex(
16001  allocator,
16002  memReq.memoryTypeBits,
16003  pAllocationCreateInfo,
16004  pMemoryTypeIndex);
16005 
16006  allocator->GetVulkanFunctions().vkDestroyImage(
16007  hDev, hImage, allocator->GetAllocationCallbacks());
16008  }
16009  return res;
16010 }
16011 
16012 VkResult vmaCreatePool(
16013  VmaAllocator allocator,
16014  const VmaPoolCreateInfo* pCreateInfo,
16015  VmaPool* pPool)
16016 {
16017  VMA_ASSERT(allocator && pCreateInfo && pPool);
16018 
16019  VMA_DEBUG_LOG("vmaCreatePool");
16020 
16021  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16022 
16023  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16024 
16025 #if VMA_RECORDING_ENABLED
16026  if(allocator->GetRecorder() != VMA_NULL)
16027  {
16028  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16029  }
16030 #endif
16031 
16032  return res;
16033 }
16034 
16035 void vmaDestroyPool(
16036  VmaAllocator allocator,
16037  VmaPool pool)
16038 {
16039  VMA_ASSERT(allocator);
16040 
16041  if(pool == VK_NULL_HANDLE)
16042  {
16043  return;
16044  }
16045 
16046  VMA_DEBUG_LOG("vmaDestroyPool");
16047 
16048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16049 
16050 #if VMA_RECORDING_ENABLED
16051  if(allocator->GetRecorder() != VMA_NULL)
16052  {
16053  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16054  }
16055 #endif
16056 
16057  allocator->DestroyPool(pool);
16058 }
16059 
16060 void vmaGetPoolStats(
16061  VmaAllocator allocator,
16062  VmaPool pool,
16063  VmaPoolStats* pPoolStats)
16064 {
16065  VMA_ASSERT(allocator && pool && pPoolStats);
16066 
16067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16068 
16069  allocator->GetPoolStats(pool, pPoolStats);
16070 }
16071 
16073  VmaAllocator allocator,
16074  VmaPool pool,
16075  size_t* pLostAllocationCount)
16076 {
16077  VMA_ASSERT(allocator && pool);
16078 
16079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16080 
16081 #if VMA_RECORDING_ENABLED
16082  if(allocator->GetRecorder() != VMA_NULL)
16083  {
16084  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16085  }
16086 #endif
16087 
16088  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16089 }
16090 
16091 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16092 {
16093  VMA_ASSERT(allocator && pool);
16094 
16095  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16096 
16097  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16098 
16099  return allocator->CheckPoolCorruption(pool);
16100 }
16101 
16102 VkResult vmaAllocateMemory(
16103  VmaAllocator allocator,
16104  const VkMemoryRequirements* pVkMemoryRequirements,
16105  const VmaAllocationCreateInfo* pCreateInfo,
16106  VmaAllocation* pAllocation,
16107  VmaAllocationInfo* pAllocationInfo)
16108 {
16109  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16110 
16111  VMA_DEBUG_LOG("vmaAllocateMemory");
16112 
16113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16114 
16115  VkResult result = allocator->AllocateMemory(
16116  *pVkMemoryRequirements,
16117  false, // requiresDedicatedAllocation
16118  false, // prefersDedicatedAllocation
16119  VK_NULL_HANDLE, // dedicatedBuffer
16120  VK_NULL_HANDLE, // dedicatedImage
16121  *pCreateInfo,
16122  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16123  1, // allocationCount
16124  pAllocation);
16125 
16126 #if VMA_RECORDING_ENABLED
16127  if(allocator->GetRecorder() != VMA_NULL)
16128  {
16129  allocator->GetRecorder()->RecordAllocateMemory(
16130  allocator->GetCurrentFrameIndex(),
16131  *pVkMemoryRequirements,
16132  *pCreateInfo,
16133  *pAllocation);
16134  }
16135 #endif
16136 
16137  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16138  {
16139  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16140  }
16141 
16142  return result;
16143 }
16144 
16145 VkResult vmaAllocateMemoryPages(
16146  VmaAllocator allocator,
16147  const VkMemoryRequirements* pVkMemoryRequirements,
16148  const VmaAllocationCreateInfo* pCreateInfo,
16149  size_t allocationCount,
16150  VmaAllocation* pAllocations,
16151  VmaAllocationInfo* pAllocationInfo)
16152 {
16153  if(allocationCount == 0)
16154  {
16155  return VK_SUCCESS;
16156  }
16157 
16158  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16159 
16160  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16161 
16162  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16163 
16164  VkResult result = allocator->AllocateMemory(
16165  *pVkMemoryRequirements,
16166  false, // requiresDedicatedAllocation
16167  false, // prefersDedicatedAllocation
16168  VK_NULL_HANDLE, // dedicatedBuffer
16169  VK_NULL_HANDLE, // dedicatedImage
16170  *pCreateInfo,
16171  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16172  allocationCount,
16173  pAllocations);
16174 
16175 #if VMA_RECORDING_ENABLED
16176  if(allocator->GetRecorder() != VMA_NULL)
16177  {
16178  allocator->GetRecorder()->RecordAllocateMemoryPages(
16179  allocator->GetCurrentFrameIndex(),
16180  *pVkMemoryRequirements,
16181  *pCreateInfo,
16182  (uint64_t)allocationCount,
16183  pAllocations);
16184  }
16185 #endif
16186 
16187  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16188  {
16189  for(size_t i = 0; i < allocationCount; ++i)
16190  {
16191  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16192  }
16193  }
16194 
16195  return result;
16196 }
16197 
16199  VmaAllocator allocator,
16200  VkBuffer buffer,
16201  const VmaAllocationCreateInfo* pCreateInfo,
16202  VmaAllocation* pAllocation,
16203  VmaAllocationInfo* pAllocationInfo)
16204 {
16205  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16206 
16207  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16208 
16209  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16210 
16211  VkMemoryRequirements vkMemReq = {};
16212  bool requiresDedicatedAllocation = false;
16213  bool prefersDedicatedAllocation = false;
16214  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16215  requiresDedicatedAllocation,
16216  prefersDedicatedAllocation);
16217 
16218  VkResult result = allocator->AllocateMemory(
16219  vkMemReq,
16220  requiresDedicatedAllocation,
16221  prefersDedicatedAllocation,
16222  buffer, // dedicatedBuffer
16223  VK_NULL_HANDLE, // dedicatedImage
16224  *pCreateInfo,
16225  VMA_SUBALLOCATION_TYPE_BUFFER,
16226  1, // allocationCount
16227  pAllocation);
16228 
16229 #if VMA_RECORDING_ENABLED
16230  if(allocator->GetRecorder() != VMA_NULL)
16231  {
16232  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16233  allocator->GetCurrentFrameIndex(),
16234  vkMemReq,
16235  requiresDedicatedAllocation,
16236  prefersDedicatedAllocation,
16237  *pCreateInfo,
16238  *pAllocation);
16239  }
16240 #endif
16241 
16242  if(pAllocationInfo && result == VK_SUCCESS)
16243  {
16244  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16245  }
16246 
16247  return result;
16248 }
16249 
16250 VkResult vmaAllocateMemoryForImage(
16251  VmaAllocator allocator,
16252  VkImage image,
16253  const VmaAllocationCreateInfo* pCreateInfo,
16254  VmaAllocation* pAllocation,
16255  VmaAllocationInfo* pAllocationInfo)
16256 {
16257  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16258 
16259  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16260 
16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16262 
16263  VkMemoryRequirements vkMemReq = {};
16264  bool requiresDedicatedAllocation = false;
16265  bool prefersDedicatedAllocation = false;
16266  allocator->GetImageMemoryRequirements(image, vkMemReq,
16267  requiresDedicatedAllocation, prefersDedicatedAllocation);
16268 
16269  VkResult result = allocator->AllocateMemory(
16270  vkMemReq,
16271  requiresDedicatedAllocation,
16272  prefersDedicatedAllocation,
16273  VK_NULL_HANDLE, // dedicatedBuffer
16274  image, // dedicatedImage
16275  *pCreateInfo,
16276  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16277  1, // allocationCount
16278  pAllocation);
16279 
16280 #if VMA_RECORDING_ENABLED
16281  if(allocator->GetRecorder() != VMA_NULL)
16282  {
16283  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16284  allocator->GetCurrentFrameIndex(),
16285  vkMemReq,
16286  requiresDedicatedAllocation,
16287  prefersDedicatedAllocation,
16288  *pCreateInfo,
16289  *pAllocation);
16290  }
16291 #endif
16292 
16293  if(pAllocationInfo && result == VK_SUCCESS)
16294  {
16295  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16296  }
16297 
16298  return result;
16299 }
16300 
16301 void vmaFreeMemory(
16302  VmaAllocator allocator,
16303  VmaAllocation allocation)
16304 {
16305  VMA_ASSERT(allocator);
16306 
16307  if(allocation == VK_NULL_HANDLE)
16308  {
16309  return;
16310  }
16311 
16312  VMA_DEBUG_LOG("vmaFreeMemory");
16313 
16314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16315 
16316 #if VMA_RECORDING_ENABLED
16317  if(allocator->GetRecorder() != VMA_NULL)
16318  {
16319  allocator->GetRecorder()->RecordFreeMemory(
16320  allocator->GetCurrentFrameIndex(),
16321  allocation);
16322  }
16323 #endif
16324 
16325  allocator->FreeMemory(
16326  1, // allocationCount
16327  &allocation);
16328 }
16329 
16330 void vmaFreeMemoryPages(
16331  VmaAllocator allocator,
16332  size_t allocationCount,
16333  VmaAllocation* pAllocations)
16334 {
16335  if(allocationCount == 0)
16336  {
16337  return;
16338  }
16339 
16340  VMA_ASSERT(allocator);
16341 
16342  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16343 
16344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16345 
16346 #if VMA_RECORDING_ENABLED
16347  if(allocator->GetRecorder() != VMA_NULL)
16348  {
16349  allocator->GetRecorder()->RecordFreeMemoryPages(
16350  allocator->GetCurrentFrameIndex(),
16351  (uint64_t)allocationCount,
16352  pAllocations);
16353  }
16354 #endif
16355 
16356  allocator->FreeMemory(allocationCount, pAllocations);
16357 }
16358 
16359 VkResult vmaResizeAllocation(
16360  VmaAllocator allocator,
16361  VmaAllocation allocation,
16362  VkDeviceSize newSize)
16363 {
16364  VMA_ASSERT(allocator && allocation);
16365 
16366  VMA_DEBUG_LOG("vmaResizeAllocation");
16367 
16368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16369 
16370 #if VMA_RECORDING_ENABLED
16371  if(allocator->GetRecorder() != VMA_NULL)
16372  {
16373  allocator->GetRecorder()->RecordResizeAllocation(
16374  allocator->GetCurrentFrameIndex(),
16375  allocation,
16376  newSize);
16377  }
16378 #endif
16379 
16380  return allocator->ResizeAllocation(allocation, newSize);
16381 }
16382 
16384  VmaAllocator allocator,
16385  VmaAllocation allocation,
16386  VmaAllocationInfo* pAllocationInfo)
16387 {
16388  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16389 
16390  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16391 
16392 #if VMA_RECORDING_ENABLED
16393  if(allocator->GetRecorder() != VMA_NULL)
16394  {
16395  allocator->GetRecorder()->RecordGetAllocationInfo(
16396  allocator->GetCurrentFrameIndex(),
16397  allocation);
16398  }
16399 #endif
16400 
16401  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16402 }
16403 
16404 VkBool32 vmaTouchAllocation(
16405  VmaAllocator allocator,
16406  VmaAllocation allocation)
16407 {
16408  VMA_ASSERT(allocator && allocation);
16409 
16410  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16411 
16412 #if VMA_RECORDING_ENABLED
16413  if(allocator->GetRecorder() != VMA_NULL)
16414  {
16415  allocator->GetRecorder()->RecordTouchAllocation(
16416  allocator->GetCurrentFrameIndex(),
16417  allocation);
16418  }
16419 #endif
16420 
16421  return allocator->TouchAllocation(allocation);
16422 }
16423 
16425  VmaAllocator allocator,
16426  VmaAllocation allocation,
16427  void* pUserData)
16428 {
16429  VMA_ASSERT(allocator && allocation);
16430 
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16432 
16433  allocation->SetUserData(allocator, pUserData);
16434 
16435 #if VMA_RECORDING_ENABLED
16436  if(allocator->GetRecorder() != VMA_NULL)
16437  {
16438  allocator->GetRecorder()->RecordSetAllocationUserData(
16439  allocator->GetCurrentFrameIndex(),
16440  allocation,
16441  pUserData);
16442  }
16443 #endif
16444 }
16445 
16447  VmaAllocator allocator,
16448  VmaAllocation* pAllocation)
16449 {
16450  VMA_ASSERT(allocator && pAllocation);
16451 
16452  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16453 
16454  allocator->CreateLostAllocation(pAllocation);
16455 
16456 #if VMA_RECORDING_ENABLED
16457  if(allocator->GetRecorder() != VMA_NULL)
16458  {
16459  allocator->GetRecorder()->RecordCreateLostAllocation(
16460  allocator->GetCurrentFrameIndex(),
16461  *pAllocation);
16462  }
16463 #endif
16464 }
16465 
16466 VkResult vmaMapMemory(
16467  VmaAllocator allocator,
16468  VmaAllocation allocation,
16469  void** ppData)
16470 {
16471  VMA_ASSERT(allocator && allocation && ppData);
16472 
16473  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16474 
16475  VkResult res = allocator->Map(allocation, ppData);
16476 
16477 #if VMA_RECORDING_ENABLED
16478  if(allocator->GetRecorder() != VMA_NULL)
16479  {
16480  allocator->GetRecorder()->RecordMapMemory(
16481  allocator->GetCurrentFrameIndex(),
16482  allocation);
16483  }
16484 #endif
16485 
16486  return res;
16487 }
16488 
16489 void vmaUnmapMemory(
16490  VmaAllocator allocator,
16491  VmaAllocation allocation)
16492 {
16493  VMA_ASSERT(allocator && allocation);
16494 
16495  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16496 
16497 #if VMA_RECORDING_ENABLED
16498  if(allocator->GetRecorder() != VMA_NULL)
16499  {
16500  allocator->GetRecorder()->RecordUnmapMemory(
16501  allocator->GetCurrentFrameIndex(),
16502  allocation);
16503  }
16504 #endif
16505 
16506  allocator->Unmap(allocation);
16507 }
16508 
16509 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16510 {
16511  VMA_ASSERT(allocator && allocation);
16512 
16513  VMA_DEBUG_LOG("vmaFlushAllocation");
16514 
16515  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16516 
16517  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16518 
16519 #if VMA_RECORDING_ENABLED
16520  if(allocator->GetRecorder() != VMA_NULL)
16521  {
16522  allocator->GetRecorder()->RecordFlushAllocation(
16523  allocator->GetCurrentFrameIndex(),
16524  allocation, offset, size);
16525  }
16526 #endif
16527 }
16528 
16529 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16530 {
16531  VMA_ASSERT(allocator && allocation);
16532 
16533  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16534 
16535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16536 
16537  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16538 
16539 #if VMA_RECORDING_ENABLED
16540  if(allocator->GetRecorder() != VMA_NULL)
16541  {
16542  allocator->GetRecorder()->RecordInvalidateAllocation(
16543  allocator->GetCurrentFrameIndex(),
16544  allocation, offset, size);
16545  }
16546 #endif
16547 }
16548 
16549 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16550 {
16551  VMA_ASSERT(allocator);
16552 
16553  VMA_DEBUG_LOG("vmaCheckCorruption");
16554 
16555  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16556 
16557  return allocator->CheckCorruption(memoryTypeBits);
16558 }
16559 
16560 VkResult vmaDefragment(
16561  VmaAllocator allocator,
16562  VmaAllocation* pAllocations,
16563  size_t allocationCount,
16564  VkBool32* pAllocationsChanged,
16565  const VmaDefragmentationInfo *pDefragmentationInfo,
16566  VmaDefragmentationStats* pDefragmentationStats)
16567 {
16568  // Deprecated interface, reimplemented using new one.
16569 
16570  VmaDefragmentationInfo2 info2 = {};
16571  info2.allocationCount = (uint32_t)allocationCount;
16572  info2.pAllocations = pAllocations;
16573  info2.pAllocationsChanged = pAllocationsChanged;
16574  if(pDefragmentationInfo != VMA_NULL)
16575  {
16576  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16577  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16578  }
16579  else
16580  {
16581  info2.maxCpuAllocationsToMove = UINT32_MAX;
16582  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16583  }
16584  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16585 
16587  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16588  if(res == VK_NOT_READY)
16589  {
16590  res = vmaDefragmentationEnd( allocator, ctx);
16591  }
16592  return res;
16593 }
16594 
16595 VkResult vmaDefragmentationBegin(
16596  VmaAllocator allocator,
16597  const VmaDefragmentationInfo2* pInfo,
16598  VmaDefragmentationStats* pStats,
16599  VmaDefragmentationContext *pContext)
16600 {
16601  VMA_ASSERT(allocator && pInfo && pContext);
16602 
16603  // Degenerate case: Nothing to defragment.
16604  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16605  {
16606  return VK_SUCCESS;
16607  }
16608 
16609  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16610  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16611  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16612  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16613 
16614  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16615 
16616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16617 
16618  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16619 
16620 #if VMA_RECORDING_ENABLED
16621  if(allocator->GetRecorder() != VMA_NULL)
16622  {
16623  allocator->GetRecorder()->RecordDefragmentationBegin(
16624  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16625  }
16626 #endif
16627 
16628  return res;
16629 }
16630 
16631 VkResult vmaDefragmentationEnd(
16632  VmaAllocator allocator,
16633  VmaDefragmentationContext context)
16634 {
16635  VMA_ASSERT(allocator);
16636 
16637  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16638 
16639  if(context != VK_NULL_HANDLE)
16640  {
16641  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16642 
16643 #if VMA_RECORDING_ENABLED
16644  if(allocator->GetRecorder() != VMA_NULL)
16645  {
16646  allocator->GetRecorder()->RecordDefragmentationEnd(
16647  allocator->GetCurrentFrameIndex(), context);
16648  }
16649 #endif
16650 
16651  return allocator->DefragmentationEnd(context);
16652  }
16653  else
16654  {
16655  return VK_SUCCESS;
16656  }
16657 }
16658 
16659 VkResult vmaBindBufferMemory(
16660  VmaAllocator allocator,
16661  VmaAllocation allocation,
16662  VkBuffer buffer)
16663 {
16664  VMA_ASSERT(allocator && allocation && buffer);
16665 
16666  VMA_DEBUG_LOG("vmaBindBufferMemory");
16667 
16668  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16669 
16670  return allocator->BindBufferMemory(allocation, buffer);
16671 }
16672 
16673 VkResult vmaBindImageMemory(
16674  VmaAllocator allocator,
16675  VmaAllocation allocation,
16676  VkImage image)
16677 {
16678  VMA_ASSERT(allocator && allocation && image);
16679 
16680  VMA_DEBUG_LOG("vmaBindImageMemory");
16681 
16682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16683 
16684  return allocator->BindImageMemory(allocation, image);
16685 }
16686 
16687 VkResult vmaCreateBuffer(
16688  VmaAllocator allocator,
16689  const VkBufferCreateInfo* pBufferCreateInfo,
16690  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16691  VkBuffer* pBuffer,
16692  VmaAllocation* pAllocation,
16693  VmaAllocationInfo* pAllocationInfo)
16694 {
16695  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16696 
16697  if(pBufferCreateInfo->size == 0)
16698  {
16699  return VK_ERROR_VALIDATION_FAILED_EXT;
16700  }
16701 
16702  VMA_DEBUG_LOG("vmaCreateBuffer");
16703 
16704  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16705 
16706  *pBuffer = VK_NULL_HANDLE;
16707  *pAllocation = VK_NULL_HANDLE;
16708 
16709  // 1. Create VkBuffer.
16710  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16711  allocator->m_hDevice,
16712  pBufferCreateInfo,
16713  allocator->GetAllocationCallbacks(),
16714  pBuffer);
16715  if(res >= 0)
16716  {
16717  // 2. vkGetBufferMemoryRequirements.
16718  VkMemoryRequirements vkMemReq = {};
16719  bool requiresDedicatedAllocation = false;
16720  bool prefersDedicatedAllocation = false;
16721  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16722  requiresDedicatedAllocation, prefersDedicatedAllocation);
16723 
16724  // Make sure alignment requirements for specific buffer usages reported
16725  // in Physical Device Properties are included in alignment reported by memory requirements.
16726  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16727  {
16728  VMA_ASSERT(vkMemReq.alignment %
16729  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16730  }
16731  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16732  {
16733  VMA_ASSERT(vkMemReq.alignment %
16734  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16735  }
16736  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16737  {
16738  VMA_ASSERT(vkMemReq.alignment %
16739  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16740  }
16741 
16742  // 3. Allocate memory using allocator.
16743  res = allocator->AllocateMemory(
16744  vkMemReq,
16745  requiresDedicatedAllocation,
16746  prefersDedicatedAllocation,
16747  *pBuffer, // dedicatedBuffer
16748  VK_NULL_HANDLE, // dedicatedImage
16749  *pAllocationCreateInfo,
16750  VMA_SUBALLOCATION_TYPE_BUFFER,
16751  1, // allocationCount
16752  pAllocation);
16753 
16754 #if VMA_RECORDING_ENABLED
16755  if(allocator->GetRecorder() != VMA_NULL)
16756  {
16757  allocator->GetRecorder()->RecordCreateBuffer(
16758  allocator->GetCurrentFrameIndex(),
16759  *pBufferCreateInfo,
16760  *pAllocationCreateInfo,
16761  *pAllocation);
16762  }
16763 #endif
16764 
16765  if(res >= 0)
16766  {
16767  // 3. Bind buffer with memory.
16768  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16769  {
16770  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16771  }
16772  if(res >= 0)
16773  {
16774  // All steps succeeded.
16775  #if VMA_STATS_STRING_ENABLED
16776  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16777  #endif
16778  if(pAllocationInfo != VMA_NULL)
16779  {
16780  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16781  }
16782 
16783  return VK_SUCCESS;
16784  }
16785  allocator->FreeMemory(
16786  1, // allocationCount
16787  pAllocation);
16788  *pAllocation = VK_NULL_HANDLE;
16789  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16790  *pBuffer = VK_NULL_HANDLE;
16791  return res;
16792  }
16793  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16794  *pBuffer = VK_NULL_HANDLE;
16795  return res;
16796  }
16797  return res;
16798 }
16799 
16800 void vmaDestroyBuffer(
16801  VmaAllocator allocator,
16802  VkBuffer buffer,
16803  VmaAllocation allocation)
16804 {
16805  VMA_ASSERT(allocator);
16806 
16807  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16808  {
16809  return;
16810  }
16811 
16812  VMA_DEBUG_LOG("vmaDestroyBuffer");
16813 
16814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16815 
16816 #if VMA_RECORDING_ENABLED
16817  if(allocator->GetRecorder() != VMA_NULL)
16818  {
16819  allocator->GetRecorder()->RecordDestroyBuffer(
16820  allocator->GetCurrentFrameIndex(),
16821  allocation);
16822  }
16823 #endif
16824 
16825  if(buffer != VK_NULL_HANDLE)
16826  {
16827  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16828  }
16829 
16830  if(allocation != VK_NULL_HANDLE)
16831  {
16832  allocator->FreeMemory(
16833  1, // allocationCount
16834  &allocation);
16835  }
16836 }
16837 
16838 VkResult vmaCreateImage(
16839  VmaAllocator allocator,
16840  const VkImageCreateInfo* pImageCreateInfo,
16841  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16842  VkImage* pImage,
16843  VmaAllocation* pAllocation,
16844  VmaAllocationInfo* pAllocationInfo)
16845 {
16846  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16847 
16848  if(pImageCreateInfo->extent.width == 0 ||
16849  pImageCreateInfo->extent.height == 0 ||
16850  pImageCreateInfo->extent.depth == 0 ||
16851  pImageCreateInfo->mipLevels == 0 ||
16852  pImageCreateInfo->arrayLayers == 0)
16853  {
16854  return VK_ERROR_VALIDATION_FAILED_EXT;
16855  }
16856 
16857  VMA_DEBUG_LOG("vmaCreateImage");
16858 
16859  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16860 
16861  *pImage = VK_NULL_HANDLE;
16862  *pAllocation = VK_NULL_HANDLE;
16863 
16864  // 1. Create VkImage.
16865  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16866  allocator->m_hDevice,
16867  pImageCreateInfo,
16868  allocator->GetAllocationCallbacks(),
16869  pImage);
16870  if(res >= 0)
16871  {
16872  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16873  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16874  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16875 
16876  // 2. Allocate memory using allocator.
16877  VkMemoryRequirements vkMemReq = {};
16878  bool requiresDedicatedAllocation = false;
16879  bool prefersDedicatedAllocation = false;
16880  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16881  requiresDedicatedAllocation, prefersDedicatedAllocation);
16882 
16883  res = allocator->AllocateMemory(
16884  vkMemReq,
16885  requiresDedicatedAllocation,
16886  prefersDedicatedAllocation,
16887  VK_NULL_HANDLE, // dedicatedBuffer
16888  *pImage, // dedicatedImage
16889  *pAllocationCreateInfo,
16890  suballocType,
16891  1, // allocationCount
16892  pAllocation);
16893 
16894 #if VMA_RECORDING_ENABLED
16895  if(allocator->GetRecorder() != VMA_NULL)
16896  {
16897  allocator->GetRecorder()->RecordCreateImage(
16898  allocator->GetCurrentFrameIndex(),
16899  *pImageCreateInfo,
16900  *pAllocationCreateInfo,
16901  *pAllocation);
16902  }
16903 #endif
16904 
16905  if(res >= 0)
16906  {
16907  // 3. Bind image with memory.
16908  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16909  {
16910  res = allocator->BindImageMemory(*pAllocation, *pImage);
16911  }
16912  if(res >= 0)
16913  {
16914  // All steps succeeded.
16915  #if VMA_STATS_STRING_ENABLED
16916  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16917  #endif
16918  if(pAllocationInfo != VMA_NULL)
16919  {
16920  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16921  }
16922 
16923  return VK_SUCCESS;
16924  }
16925  allocator->FreeMemory(
16926  1, // allocationCount
16927  pAllocation);
16928  *pAllocation = VK_NULL_HANDLE;
16929  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16930  *pImage = VK_NULL_HANDLE;
16931  return res;
16932  }
16933  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16934  *pImage = VK_NULL_HANDLE;
16935  return res;
16936  }
16937  return res;
16938 }
16939 
16940 void vmaDestroyImage(
16941  VmaAllocator allocator,
16942  VkImage image,
16943  VmaAllocation allocation)
16944 {
16945  VMA_ASSERT(allocator);
16946 
16947  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16948  {
16949  return;
16950  }
16951 
16952  VMA_DEBUG_LOG("vmaDestroyImage");
16953 
16954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16955 
16956 #if VMA_RECORDING_ENABLED
16957  if(allocator->GetRecorder() != VMA_NULL)
16958  {
16959  allocator->GetRecorder()->RecordDestroyImage(
16960  allocator->GetCurrentFrameIndex(),
16961  allocation);
16962  }
16963 #endif
16964 
16965  if(image != VK_NULL_HANDLE)
16966  {
16967  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16968  }
16969  if(allocation != VK_NULL_HANDLE)
16970  {
16971  allocator->FreeMemory(
16972  1, // allocationCount
16973  &allocation);
16974  }
16975 }
16976 
16977 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1756
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2056
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1635 /*
1636 Define this macro to 0/1 to disable/enable support for recording functionality,
1637 available through VmaAllocatorCreateInfo::pRecordSettings.
1638 */
1639 #ifndef VMA_RECORDING_ENABLED
1640  #ifdef _WIN32
1641  #define VMA_RECORDING_ENABLED 1
1642  #else
1643  #define VMA_RECORDING_ENABLED 0
1644  #endif
1645 #endif
1646 
1647 #ifndef NOMINMAX
1648  #define NOMINMAX // For windows.h
1649 #endif
1650 
1651 #ifndef VULKAN_H_
1652  #include <vulkan/vulkan.h>
1653 #endif
1654 
1655 #if VMA_RECORDING_ENABLED
1656  #include <windows.h>
1657 #endif
1658 
1659 #if !defined(VMA_DEDICATED_ALLOCATION)
1660  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1661  #define VMA_DEDICATED_ALLOCATION 1
1662  #else
1663  #define VMA_DEDICATED_ALLOCATION 0
1664  #endif
1665 #endif
1666 
1676 VK_DEFINE_HANDLE(VmaAllocator)
1677 
1678 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1680  VmaAllocator allocator,
1681  uint32_t memoryType,
1682  VkDeviceMemory memory,
1683  VkDeviceSize size);
1685 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1686  VmaAllocator allocator,
1687  uint32_t memoryType,
1688  VkDeviceMemory memory,
1689  VkDeviceSize size);
1690 
1704 
1734 
1737 typedef VkFlags VmaAllocatorCreateFlags;
1738 
1743 typedef struct VmaVulkanFunctions {
1744  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1745  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1746  PFN_vkAllocateMemory vkAllocateMemory;
1747  PFN_vkFreeMemory vkFreeMemory;
1748  PFN_vkMapMemory vkMapMemory;
1749  PFN_vkUnmapMemory vkUnmapMemory;
1750  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1751  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1752  PFN_vkBindBufferMemory vkBindBufferMemory;
1753  PFN_vkBindImageMemory vkBindImageMemory;
1754  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1755  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1756  PFN_vkCreateBuffer vkCreateBuffer;
1757  PFN_vkDestroyBuffer vkDestroyBuffer;
1758  PFN_vkCreateImage vkCreateImage;
1759  PFN_vkDestroyImage vkDestroyImage;
1760  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1761 #if VMA_DEDICATED_ALLOCATION
1762  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1764 #endif
1766 
1768 typedef enum VmaRecordFlagBits {
1775 
1778 typedef VkFlags VmaRecordFlags;
1779 
1781 typedef struct VmaRecordSettings
1782 {
1792  const char* pFilePath;
1794 
1797 {
1801 
1802  VkPhysicalDevice physicalDevice;
1804 
1805  VkDevice device;
1807 
1810 
1811  const VkAllocationCallbacks* pAllocationCallbacks;
1813 
1853  const VkDeviceSize* pHeapSizeLimit;
1874 
1876 VkResult vmaCreateAllocator(
1877  const VmaAllocatorCreateInfo* pCreateInfo,
1878  VmaAllocator* pAllocator);
1879 
1881 void vmaDestroyAllocator(
1882  VmaAllocator allocator);
1883 
1889  VmaAllocator allocator,
1890  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1891 
1897  VmaAllocator allocator,
1898  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1899 
1907  VmaAllocator allocator,
1908  uint32_t memoryTypeIndex,
1909  VkMemoryPropertyFlags* pFlags);
1910 
1920  VmaAllocator allocator,
1921  uint32_t frameIndex);
1922 
1925 typedef struct VmaStatInfo
1926 {
1928  uint32_t blockCount;
1934  VkDeviceSize usedBytes;
1936  VkDeviceSize unusedBytes;
1939 } VmaStatInfo;
1940 
1942 typedef struct VmaStats
1943 {
1944  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1945  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1947 } VmaStats;
1948 
1950 void vmaCalculateStats(
1951  VmaAllocator allocator,
1952  VmaStats* pStats);
1953 
1954 #ifndef VMA_STATS_STRING_ENABLED
1955 #define VMA_STATS_STRING_ENABLED 1
1956 #endif
1957 
1958 #if VMA_STATS_STRING_ENABLED
1959 
1961 
1963 void vmaBuildStatsString(
1964  VmaAllocator allocator,
1965  char** ppStatsString,
1966  VkBool32 detailedMap);
1967 
1968 void vmaFreeStatsString(
1969  VmaAllocator allocator,
1970  char* pStatsString);
1971 
1972 #endif // #if VMA_STATS_STRING_ENABLED
1973 
1982 VK_DEFINE_HANDLE(VmaPool)
1983 
1984 typedef enum VmaMemoryUsage
1985 {
2034 } VmaMemoryUsage;
2035 
2045 
2106 
2122 
2132 
2139 
2143 
2145 {
2158  VkMemoryPropertyFlags requiredFlags;
2163  VkMemoryPropertyFlags preferredFlags;
2171  uint32_t memoryTypeBits;
2184  void* pUserData;
2186 
2203 VkResult vmaFindMemoryTypeIndex(
2204  VmaAllocator allocator,
2205  uint32_t memoryTypeBits,
2206  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2207  uint32_t* pMemoryTypeIndex);
2208 
2222  VmaAllocator allocator,
2223  const VkBufferCreateInfo* pBufferCreateInfo,
2224  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2225  uint32_t* pMemoryTypeIndex);
2226 
2240  VmaAllocator allocator,
2241  const VkImageCreateInfo* pImageCreateInfo,
2242  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2243  uint32_t* pMemoryTypeIndex);
2244 
2265 
2282 
2293 
2299 
2302 typedef VkFlags VmaPoolCreateFlags;
2303 
2306 typedef struct VmaPoolCreateInfo {
2321  VkDeviceSize blockSize;
2350 
2353 typedef struct VmaPoolStats {
2356  VkDeviceSize size;
2359  VkDeviceSize unusedSize;
2372  VkDeviceSize unusedRangeSizeMax;
2375  size_t blockCount;
2376 } VmaPoolStats;
2377 
2384 VkResult vmaCreatePool(
2385  VmaAllocator allocator,
2386  const VmaPoolCreateInfo* pCreateInfo,
2387  VmaPool* pPool);
2388 
2391 void vmaDestroyPool(
2392  VmaAllocator allocator,
2393  VmaPool pool);
2394 
2401 void vmaGetPoolStats(
2402  VmaAllocator allocator,
2403  VmaPool pool,
2404  VmaPoolStats* pPoolStats);
2405 
2413  VmaAllocator allocator,
2414  VmaPool pool,
2415  size_t* pLostAllocationCount);
2416 
2431 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2432 
2457 VK_DEFINE_HANDLE(VmaAllocation)
2458 
2459 
2461 typedef struct VmaAllocationInfo {
2466  uint32_t memoryType;
2475  VkDeviceMemory deviceMemory;
2480  VkDeviceSize offset;
2485  VkDeviceSize size;
2499  void* pUserData;
2501 
2512 VkResult vmaAllocateMemory(
2513  VmaAllocator allocator,
2514  const VkMemoryRequirements* pVkMemoryRequirements,
2515  const VmaAllocationCreateInfo* pCreateInfo,
2516  VmaAllocation* pAllocation,
2517  VmaAllocationInfo* pAllocationInfo);
2518 
2538 VkResult vmaAllocateMemoryPages(
2539  VmaAllocator allocator,
2540  const VkMemoryRequirements* pVkMemoryRequirements,
2541  const VmaAllocationCreateInfo* pCreateInfo,
2542  size_t allocationCount,
2543  VmaAllocation* pAllocations,
2544  VmaAllocationInfo* pAllocationInfo);
2545 
2553  VmaAllocator allocator,
2554  VkBuffer buffer,
2555  const VmaAllocationCreateInfo* pCreateInfo,
2556  VmaAllocation* pAllocation,
2557  VmaAllocationInfo* pAllocationInfo);
2558 
2560 VkResult vmaAllocateMemoryForImage(
2561  VmaAllocator allocator,
2562  VkImage image,
2563  const VmaAllocationCreateInfo* pCreateInfo,
2564  VmaAllocation* pAllocation,
2565  VmaAllocationInfo* pAllocationInfo);
2566 
2571 void vmaFreeMemory(
2572  VmaAllocator allocator,
2573  VmaAllocation allocation);
2574 
2585 void vmaFreeMemoryPages(
2586  VmaAllocator allocator,
2587  size_t allocationCount,
2588  VmaAllocation* pAllocations);
2589 
2610 VkResult vmaResizeAllocation(
2611  VmaAllocator allocator,
2612  VmaAllocation allocation,
2613  VkDeviceSize newSize);
2614 
2632  VmaAllocator allocator,
2633  VmaAllocation allocation,
2634  VmaAllocationInfo* pAllocationInfo);
2635 
2650 VkBool32 vmaTouchAllocation(
2651  VmaAllocator allocator,
2652  VmaAllocation allocation);
2653 
2668  VmaAllocator allocator,
2669  VmaAllocation allocation,
2670  void* pUserData);
2671 
2683  VmaAllocator allocator,
2684  VmaAllocation* pAllocation);
2685 
2720 VkResult vmaMapMemory(
2721  VmaAllocator allocator,
2722  VmaAllocation allocation,
2723  void** ppData);
2724 
2729 void vmaUnmapMemory(
2730  VmaAllocator allocator,
2731  VmaAllocation allocation);
2732 
2749 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2750 
2767 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2768 
2785 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2786 
2793 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2794 
2795 typedef enum VmaDefragmentationFlagBits {
2799 typedef VkFlags VmaDefragmentationFlags;
2800 
2805 typedef struct VmaDefragmentationInfo2 {
2829  uint32_t poolCount;
2850  VkDeviceSize maxCpuBytesToMove;
2860  VkDeviceSize maxGpuBytesToMove;
2874  VkCommandBuffer commandBuffer;
2876 
2881 typedef struct VmaDefragmentationInfo {
2886  VkDeviceSize maxBytesToMove;
2893 
2895 typedef struct VmaDefragmentationStats {
2897  VkDeviceSize bytesMoved;
2899  VkDeviceSize bytesFreed;
2905 
2935 VkResult vmaDefragmentationBegin(
2936  VmaAllocator allocator,
2937  const VmaDefragmentationInfo2* pInfo,
2938  VmaDefragmentationStats* pStats,
2939  VmaDefragmentationContext *pContext);
2940 
2946 VkResult vmaDefragmentationEnd(
2947  VmaAllocator allocator,
2948  VmaDefragmentationContext context);
2949 
2990 VkResult vmaDefragment(
2991  VmaAllocator allocator,
2992  VmaAllocation* pAllocations,
2993  size_t allocationCount,
2994  VkBool32* pAllocationsChanged,
2995  const VmaDefragmentationInfo *pDefragmentationInfo,
2996  VmaDefragmentationStats* pDefragmentationStats);
2997 
3010 VkResult vmaBindBufferMemory(
3011  VmaAllocator allocator,
3012  VmaAllocation allocation,
3013  VkBuffer buffer);
3014 
3027 VkResult vmaBindImageMemory(
3028  VmaAllocator allocator,
3029  VmaAllocation allocation,
3030  VkImage image);
3031 
3058 VkResult vmaCreateBuffer(
3059  VmaAllocator allocator,
3060  const VkBufferCreateInfo* pBufferCreateInfo,
3061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3062  VkBuffer* pBuffer,
3063  VmaAllocation* pAllocation,
3064  VmaAllocationInfo* pAllocationInfo);
3065 
3077 void vmaDestroyBuffer(
3078  VmaAllocator allocator,
3079  VkBuffer buffer,
3080  VmaAllocation allocation);
3081 
3083 VkResult vmaCreateImage(
3084  VmaAllocator allocator,
3085  const VkImageCreateInfo* pImageCreateInfo,
3086  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3087  VkImage* pImage,
3088  VmaAllocation* pAllocation,
3089  VmaAllocationInfo* pAllocationInfo);
3090 
3102 void vmaDestroyImage(
3103  VmaAllocator allocator,
3104  VkImage image,
3105  VmaAllocation allocation);
3106 
3107 #ifdef __cplusplus
3108 }
3109 #endif
3110 
3111 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3112 
3113 // For Visual Studio IntelliSense.
3114 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3115 #define VMA_IMPLEMENTATION
3116 #endif
3117 
3118 #ifdef VMA_IMPLEMENTATION
3119 #undef VMA_IMPLEMENTATION
3120 
3121 #include <cstdint>
3122 #include <cstdlib>
3123 #include <cstring>
3124 
3125 /*******************************************************************************
3126 CONFIGURATION SECTION
3127 
3128 Define some of these macros before each #include of this header or change them
3129 here if you need other then default behavior depending on your environment.
3130 */
3131 
3132 /*
3133 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3134 internally, like:
3135 
3136  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3137 
3138 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3139 VmaAllocatorCreateInfo::pVulkanFunctions.
3140 */
3141 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3142 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3143 #endif
3144 
3145 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3146 //#define VMA_USE_STL_CONTAINERS 1
3147 
3148 /* Set this macro to 1 to make the library including and using STL containers:
3149 std::pair, std::vector, std::list, std::unordered_map.
3150 
3151 Set it to 0 or undefined to make the library using its own implementation of
3152 the containers.
3153 */
3154 #if VMA_USE_STL_CONTAINERS
3155  #define VMA_USE_STL_VECTOR 1
3156  #define VMA_USE_STL_UNORDERED_MAP 1
3157  #define VMA_USE_STL_LIST 1
3158 #endif
3159 
3160 #ifndef VMA_USE_STL_SHARED_MUTEX
3161  // Compiler conforms to C++17.
3162  #if __cplusplus >= 201703L
3163  #define VMA_USE_STL_SHARED_MUTEX 1
3164  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3165  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3166  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3167  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3168  #define VMA_USE_STL_SHARED_MUTEX 1
3169  #else
3170  #define VMA_USE_STL_SHARED_MUTEX 0
3171  #endif
3172 #endif
3173 
3174 /*
3175 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3176 Library has its own container implementation.
3177 */
3178 #if VMA_USE_STL_VECTOR
3179  #include <vector>
3180 #endif
3181 
3182 #if VMA_USE_STL_UNORDERED_MAP
3183  #include <unordered_map>
3184 #endif
3185 
3186 #if VMA_USE_STL_LIST
3187  #include <list>
3188 #endif
3189 
3190 /*
3191 Following headers are used in this CONFIGURATION section only, so feel free to
3192 remove them if not needed.
3193 */
3194 #include <cassert> // for assert
3195 #include <algorithm> // for min, max
3196 #include <mutex>
3197 
3198 #ifndef VMA_NULL
3199  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3200  #define VMA_NULL nullptr
3201 #endif
3202 
3203 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3204 #include <cstdlib>
3205 void *aligned_alloc(size_t alignment, size_t size)
3206 {
3207  // alignment must be >= sizeof(void*)
3208  if(alignment < sizeof(void*))
3209  {
3210  alignment = sizeof(void*);
3211  }
3212 
3213  return memalign(alignment, size);
3214 }
3215 #elif defined(__APPLE__) || defined(__ANDROID__)
3216 #include <cstdlib>
3217 void *aligned_alloc(size_t alignment, size_t size)
3218 {
3219  // alignment must be >= sizeof(void*)
3220  if(alignment < sizeof(void*))
3221  {
3222  alignment = sizeof(void*);
3223  }
3224 
3225  void *pointer;
3226  if(posix_memalign(&pointer, alignment, size) == 0)
3227  return pointer;
3228  return VMA_NULL;
3229 }
3230 #endif
3231 
3232 // If your compiler is not compatible with C++11 and definition of
3233 // aligned_alloc() function is missing, uncommeting following line may help:
3234 
3235 //#include <malloc.h>
3236 
3237 // Normal assert to check for programmer's errors, especially in Debug configuration.
3238 #ifndef VMA_ASSERT
3239  #ifdef _DEBUG
3240  #define VMA_ASSERT(expr) assert(expr)
3241  #else
3242  #define VMA_ASSERT(expr)
3243  #endif
3244 #endif
3245 
3246 // Assert that will be called very often, like inside data structures e.g. operator[].
3247 // Making it non-empty can make program slow.
3248 #ifndef VMA_HEAVY_ASSERT
3249  #ifdef _DEBUG
3250  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3251  #else
3252  #define VMA_HEAVY_ASSERT(expr)
3253  #endif
3254 #endif
3255 
3256 #ifndef VMA_ALIGN_OF
3257  #define VMA_ALIGN_OF(type) (__alignof(type))
3258 #endif
3259 
3260 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3261  #if defined(_WIN32)
3262  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3263  #else
3264  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3265  #endif
3266 #endif
3267 
3268 #ifndef VMA_SYSTEM_FREE
3269  #if defined(_WIN32)
3270  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3271  #else
3272  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3273  #endif
3274 #endif
3275 
3276 #ifndef VMA_MIN
3277  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3278 #endif
3279 
3280 #ifndef VMA_MAX
3281  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3282 #endif
3283 
3284 #ifndef VMA_SWAP
3285  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3286 #endif
3287 
3288 #ifndef VMA_SORT
3289  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3290 #endif
3291 
3292 #ifndef VMA_DEBUG_LOG
3293  #define VMA_DEBUG_LOG(format, ...)
3294  /*
3295  #define VMA_DEBUG_LOG(format, ...) do { \
3296  printf(format, __VA_ARGS__); \
3297  printf("\n"); \
3298  } while(false)
3299  */
3300 #endif
3301 
3302 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3303 #if VMA_STATS_STRING_ENABLED
3304  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3305  {
3306  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3307  }
3308  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3309  {
3310  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3311  }
3312  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3313  {
3314  snprintf(outStr, strLen, "%p", ptr);
3315  }
3316 #endif
3317 
3318 #ifndef VMA_MUTEX
3319  class VmaMutex
3320  {
3321  public:
3322  void Lock() { m_Mutex.lock(); }
3323  void Unlock() { m_Mutex.unlock(); }
3324  private:
3325  std::mutex m_Mutex;
3326  };
3327  #define VMA_MUTEX VmaMutex
3328 #endif
3329 
3330 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3331 #ifndef VMA_RW_MUTEX
3332  #if VMA_USE_STL_SHARED_MUTEX
3333  // Use std::shared_mutex from C++17.
3334  #include <shared_mutex>
3335  class VmaRWMutex
3336  {
3337  public:
3338  void LockRead() { m_Mutex.lock_shared(); }
3339  void UnlockRead() { m_Mutex.unlock_shared(); }
3340  void LockWrite() { m_Mutex.lock(); }
3341  void UnlockWrite() { m_Mutex.unlock(); }
3342  private:
3343  std::shared_mutex m_Mutex;
3344  };
3345  #define VMA_RW_MUTEX VmaRWMutex
3346  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3347  // Use SRWLOCK from WinAPI.
3348  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3349  class VmaRWMutex
3350  {
3351  public:
3352  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3353  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3354  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3355  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3356  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3357  private:
3358  SRWLOCK m_Lock;
3359  };
3360  #define VMA_RW_MUTEX VmaRWMutex
3361  #else
3362  // Less efficient fallback: Use normal mutex.
3363  class VmaRWMutex
3364  {
3365  public:
3366  void LockRead() { m_Mutex.Lock(); }
3367  void UnlockRead() { m_Mutex.Unlock(); }
3368  void LockWrite() { m_Mutex.Lock(); }
3369  void UnlockWrite() { m_Mutex.Unlock(); }
3370  private:
3371  VMA_MUTEX m_Mutex;
3372  };
3373  #define VMA_RW_MUTEX VmaRWMutex
3374  #endif // #if VMA_USE_STL_SHARED_MUTEX
3375 #endif // #ifndef VMA_RW_MUTEX
3376 
3377 /*
3378 If providing your own implementation, you need to implement a subset of std::atomic:
3379 
3380 - Constructor(uint32_t desired)
3381 - uint32_t load() const
3382 - void store(uint32_t desired)
3383 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3384 */
3385 #ifndef VMA_ATOMIC_UINT32
3386  #include <atomic>
3387  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3388 #endif
3389 
3390 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3391 
3395  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3396 #endif
3397 
3398 #ifndef VMA_DEBUG_ALIGNMENT
3399 
3403  #define VMA_DEBUG_ALIGNMENT (1)
3404 #endif
3405 
3406 #ifndef VMA_DEBUG_MARGIN
3407 
3411  #define VMA_DEBUG_MARGIN (0)
3412 #endif
3413 
3414 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3415 
3419  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3420 #endif
3421 
3422 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3423 
3428  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3429 #endif
3430 
3431 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3432 
3436  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3437 #endif
3438 
3439 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3440 
3444  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3445 #endif
3446 
3447 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3448  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3450 #endif
3451 
3452 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3453  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3455 #endif
3456 
3457 #ifndef VMA_CLASS_NO_COPY
3458  #define VMA_CLASS_NO_COPY(className) \
3459  private: \
3460  className(const className&) = delete; \
3461  className& operator=(const className&) = delete;
3462 #endif
3463 
3464 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3465 
3466 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3467 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3468 
3469 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3470 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3471 
3472 /*******************************************************************************
3473 END OF CONFIGURATION
3474 */
3475 
3476 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3477 
3478 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3479  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3480 
3481 // Returns number of bits set to 1 in (v).
3482 static inline uint32_t VmaCountBitsSet(uint32_t v)
3483 {
3484  uint32_t c = v - ((v >> 1) & 0x55555555);
3485  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3486  c = ((c >> 4) + c) & 0x0F0F0F0F;
3487  c = ((c >> 8) + c) & 0x00FF00FF;
3488  c = ((c >> 16) + c) & 0x0000FFFF;
3489  return c;
3490 }
3491 
3492 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3493 // Use types like uint32_t, uint64_t as T.
3494 template <typename T>
3495 static inline T VmaAlignUp(T val, T align)
3496 {
3497  return (val + align - 1) / align * align;
3498 }
3499 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3500 // Use types like uint32_t, uint64_t as T.
3501 template <typename T>
3502 static inline T VmaAlignDown(T val, T align)
3503 {
3504  return val / align * align;
3505 }
3506 
3507 // Division with mathematical rounding to nearest number.
3508 template <typename T>
3509 static inline T VmaRoundDiv(T x, T y)
3510 {
3511  return (x + (y / (T)2)) / y;
3512 }
3513 
3514 /*
3515 Returns true if given number is a power of two.
3516 T must be unsigned integer number or signed integer but always nonnegative.
3517 For 0 returns true.
3518 */
3519 template <typename T>
3520 inline bool VmaIsPow2(T x)
3521 {
3522  return (x & (x-1)) == 0;
3523 }
3524 
3525 // Returns smallest power of 2 greater or equal to v.
3526 static inline uint32_t VmaNextPow2(uint32_t v)
3527 {
3528  v--;
3529  v |= v >> 1;
3530  v |= v >> 2;
3531  v |= v >> 4;
3532  v |= v >> 8;
3533  v |= v >> 16;
3534  v++;
3535  return v;
3536 }
3537 static inline uint64_t VmaNextPow2(uint64_t v)
3538 {
3539  v--;
3540  v |= v >> 1;
3541  v |= v >> 2;
3542  v |= v >> 4;
3543  v |= v >> 8;
3544  v |= v >> 16;
3545  v |= v >> 32;
3546  v++;
3547  return v;
3548 }
3549 
3550 // Returns largest power of 2 less or equal to v.
3551 static inline uint32_t VmaPrevPow2(uint32_t v)
3552 {
3553  v |= v >> 1;
3554  v |= v >> 2;
3555  v |= v >> 4;
3556  v |= v >> 8;
3557  v |= v >> 16;
3558  v = v ^ (v >> 1);
3559  return v;
3560 }
3561 static inline uint64_t VmaPrevPow2(uint64_t v)
3562 {
3563  v |= v >> 1;
3564  v |= v >> 2;
3565  v |= v >> 4;
3566  v |= v >> 8;
3567  v |= v >> 16;
3568  v |= v >> 32;
3569  v = v ^ (v >> 1);
3570  return v;
3571 }
3572 
3573 static inline bool VmaStrIsEmpty(const char* pStr)
3574 {
3575  return pStr == VMA_NULL || *pStr == '\0';
3576 }
3577 
3578 #if VMA_STATS_STRING_ENABLED
3579 
3580 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3581 {
3582  switch(algorithm)
3583  {
3585  return "Linear";
3587  return "Buddy";
3588  case 0:
3589  return "Default";
3590  default:
3591  VMA_ASSERT(0);
3592  return "";
3593  }
3594 }
3595 
3596 #endif // #if VMA_STATS_STRING_ENABLED
3597 
3598 #ifndef VMA_SORT
3599 
3600 template<typename Iterator, typename Compare>
3601 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3602 {
3603  Iterator centerValue = end; --centerValue;
3604  Iterator insertIndex = beg;
3605  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3606  {
3607  if(cmp(*memTypeIndex, *centerValue))
3608  {
3609  if(insertIndex != memTypeIndex)
3610  {
3611  VMA_SWAP(*memTypeIndex, *insertIndex);
3612  }
3613  ++insertIndex;
3614  }
3615  }
3616  if(insertIndex != centerValue)
3617  {
3618  VMA_SWAP(*insertIndex, *centerValue);
3619  }
3620  return insertIndex;
3621 }
3622 
3623 template<typename Iterator, typename Compare>
3624 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3625 {
3626  if(beg < end)
3627  {
3628  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3629  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3630  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3631  }
3632 }
3633 
3634 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3635 
3636 #endif // #ifndef VMA_SORT
3637 
3638 /*
3639 Returns true if two memory blocks occupy overlapping pages.
3640 ResourceA must be in less memory offset than ResourceB.
3641 
3642 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3643 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3644 */
3645 static inline bool VmaBlocksOnSamePage(
3646  VkDeviceSize resourceAOffset,
3647  VkDeviceSize resourceASize,
3648  VkDeviceSize resourceBOffset,
3649  VkDeviceSize pageSize)
3650 {
3651  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3652  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3653  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3654  VkDeviceSize resourceBStart = resourceBOffset;
3655  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3656  return resourceAEndPage == resourceBStartPage;
3657 }
3658 
3659 enum VmaSuballocationType
3660 {
3661  VMA_SUBALLOCATION_TYPE_FREE = 0,
3662  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3663  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3664  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3665  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3666  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3667  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3668 };
3669 
3670 /*
3671 Returns true if given suballocation types could conflict and must respect
3672 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3673 or linear image and another one is optimal image. If type is unknown, behave
3674 conservatively.
3675 */
3676 static inline bool VmaIsBufferImageGranularityConflict(
3677  VmaSuballocationType suballocType1,
3678  VmaSuballocationType suballocType2)
3679 {
3680  if(suballocType1 > suballocType2)
3681  {
3682  VMA_SWAP(suballocType1, suballocType2);
3683  }
3684 
3685  switch(suballocType1)
3686  {
3687  case VMA_SUBALLOCATION_TYPE_FREE:
3688  return false;
3689  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3690  return true;
3691  case VMA_SUBALLOCATION_TYPE_BUFFER:
3692  return
3693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3694  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3695  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3696  return
3697  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3698  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3700  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3701  return
3702  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3703  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3704  return false;
3705  default:
3706  VMA_ASSERT(0);
3707  return true;
3708  }
3709 }
3710 
3711 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3712 {
3713  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3714  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3715  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3716  {
3717  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3718  }
3719 }
3720 
3721 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3722 {
3723  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3724  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3725  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3726  {
3727  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3728  {
3729  return false;
3730  }
3731  }
3732  return true;
3733 }
3734 
3735 /*
3736 Fills structure with parameters of an example buffer to be used for transfers
3737 during GPU memory defragmentation.
3738 */
3739 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3740 {
3741  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3742  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3743  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3744  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3745 }
3746 
3747 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3748 struct VmaMutexLock
3749 {
3750  VMA_CLASS_NO_COPY(VmaMutexLock)
3751 public:
3752  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3753  m_pMutex(useMutex ? &mutex : VMA_NULL)
3754  { if(m_pMutex) { m_pMutex->Lock(); } }
3755  ~VmaMutexLock()
3756  { if(m_pMutex) { m_pMutex->Unlock(); } }
3757 private:
3758  VMA_MUTEX* m_pMutex;
3759 };
3760 
3761 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3762 struct VmaMutexLockRead
3763 {
3764  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3765 public:
3766  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3767  m_pMutex(useMutex ? &mutex : VMA_NULL)
3768  { if(m_pMutex) { m_pMutex->LockRead(); } }
3769  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3770 private:
3771  VMA_RW_MUTEX* m_pMutex;
3772 };
3773 
3774 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3775 struct VmaMutexLockWrite
3776 {
3777  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3778 public:
3779  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3780  m_pMutex(useMutex ? &mutex : VMA_NULL)
3781  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3782  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3783 private:
3784  VMA_RW_MUTEX* m_pMutex;
3785 };
3786 
3787 #if VMA_DEBUG_GLOBAL_MUTEX
3788  static VMA_MUTEX gDebugGlobalMutex;
3789  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3790 #else
3791  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3792 #endif
3793 
3794 // Minimum size of a free suballocation to register it in the free suballocation collection.
3795 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3796 
3797 /*
3798 Performs binary search and returns iterator to first element that is greater or
3799 equal to (key), according to comparison (cmp).
3800 
3801 Cmp should return true if first argument is less than second argument.
3802 
3803 Returned value is the found element, if present in the collection or place where
3804 new element with value (key) should be inserted.
3805 */
3806 template <typename CmpLess, typename IterT, typename KeyT>
3807 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3808 {
3809  size_t down = 0, up = (end - beg);
3810  while(down < up)
3811  {
3812  const size_t mid = (down + up) / 2;
3813  if(cmp(*(beg+mid), key))
3814  {
3815  down = mid + 1;
3816  }
3817  else
3818  {
3819  up = mid;
3820  }
3821  }
3822  return beg + down;
3823 }
3824 
3825 /*
3826 Returns true if all pointers in the array are not-null and unique.
3827 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3828 T must be pointer type, e.g. VmaAllocation, VmaPool.
3829 */
3830 template<typename T>
3831 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3832 {
3833  for(uint32_t i = 0; i < count; ++i)
3834  {
3835  const T iPtr = arr[i];
3836  if(iPtr == VMA_NULL)
3837  {
3838  return false;
3839  }
3840  for(uint32_t j = i + 1; j < count; ++j)
3841  {
3842  if(iPtr == arr[j])
3843  {
3844  return false;
3845  }
3846  }
3847  }
3848  return true;
3849 }
3850 
3852 // Memory allocation
3853 
3854 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3855 {
3856  if((pAllocationCallbacks != VMA_NULL) &&
3857  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3858  {
3859  return (*pAllocationCallbacks->pfnAllocation)(
3860  pAllocationCallbacks->pUserData,
3861  size,
3862  alignment,
3863  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3864  }
3865  else
3866  {
3867  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3868  }
3869 }
3870 
3871 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3872 {
3873  if((pAllocationCallbacks != VMA_NULL) &&
3874  (pAllocationCallbacks->pfnFree != VMA_NULL))
3875  {
3876  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3877  }
3878  else
3879  {
3880  VMA_SYSTEM_FREE(ptr);
3881  }
3882 }
3883 
3884 template<typename T>
3885 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3886 {
3887  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3888 }
3889 
3890 template<typename T>
3891 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3892 {
3893  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3894 }
3895 
3896 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3897 
3898 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3899 
3900 template<typename T>
3901 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3902 {
3903  ptr->~T();
3904  VmaFree(pAllocationCallbacks, ptr);
3905 }
3906 
3907 template<typename T>
3908 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3909 {
3910  if(ptr != VMA_NULL)
3911  {
3912  for(size_t i = count; i--; )
3913  {
3914  ptr[i].~T();
3915  }
3916  VmaFree(pAllocationCallbacks, ptr);
3917  }
3918 }
3919 
3920 // STL-compatible allocator.
3921 template<typename T>
3922 class VmaStlAllocator
3923 {
3924 public:
3925  const VkAllocationCallbacks* const m_pCallbacks;
3926  typedef T value_type;
3927 
3928  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3929  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3930 
3931  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3932  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3933 
3934  template<typename U>
3935  bool operator==(const VmaStlAllocator<U>& rhs) const
3936  {
3937  return m_pCallbacks == rhs.m_pCallbacks;
3938  }
3939  template<typename U>
3940  bool operator!=(const VmaStlAllocator<U>& rhs) const
3941  {
3942  return m_pCallbacks != rhs.m_pCallbacks;
3943  }
3944 
3945  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3946 };
3947 
3948 #if VMA_USE_STL_VECTOR
3949 
3950 #define VmaVector std::vector
3951 
3952 template<typename T, typename allocatorT>
3953 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3954 {
3955  vec.insert(vec.begin() + index, item);
3956 }
3957 
3958 template<typename T, typename allocatorT>
3959 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3960 {
3961  vec.erase(vec.begin() + index);
3962 }
3963 
3964 #else // #if VMA_USE_STL_VECTOR
3965 
3966 /* Class with interface compatible with subset of std::vector.
3967 T must be POD because constructors and destructors are not called and memcpy is
3968 used for these objects. */
3969 template<typename T, typename AllocatorT>
3970 class VmaVector
3971 {
3972 public:
3973  typedef T value_type;
3974 
3975  VmaVector(const AllocatorT& allocator) :
3976  m_Allocator(allocator),
3977  m_pArray(VMA_NULL),
3978  m_Count(0),
3979  m_Capacity(0)
3980  {
3981  }
3982 
3983  VmaVector(size_t count, const AllocatorT& allocator) :
3984  m_Allocator(allocator),
3985  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3986  m_Count(count),
3987  m_Capacity(count)
3988  {
3989  }
3990 
3991  VmaVector(const VmaVector<T, AllocatorT>& src) :
3992  m_Allocator(src.m_Allocator),
3993  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3994  m_Count(src.m_Count),
3995  m_Capacity(src.m_Count)
3996  {
3997  if(m_Count != 0)
3998  {
3999  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4000  }
4001  }
4002 
4003  ~VmaVector()
4004  {
4005  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4006  }
4007 
4008  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4009  {
4010  if(&rhs != this)
4011  {
4012  resize(rhs.m_Count);
4013  if(m_Count != 0)
4014  {
4015  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4016  }
4017  }
4018  return *this;
4019  }
4020 
4021  bool empty() const { return m_Count == 0; }
4022  size_t size() const { return m_Count; }
4023  T* data() { return m_pArray; }
4024  const T* data() const { return m_pArray; }
4025 
4026  T& operator[](size_t index)
4027  {
4028  VMA_HEAVY_ASSERT(index < m_Count);
4029  return m_pArray[index];
4030  }
4031  const T& operator[](size_t index) const
4032  {
4033  VMA_HEAVY_ASSERT(index < m_Count);
4034  return m_pArray[index];
4035  }
4036 
4037  T& front()
4038  {
4039  VMA_HEAVY_ASSERT(m_Count > 0);
4040  return m_pArray[0];
4041  }
4042  const T& front() const
4043  {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  return m_pArray[0];
4046  }
4047  T& back()
4048  {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  return m_pArray[m_Count - 1];
4051  }
4052  const T& back() const
4053  {
4054  VMA_HEAVY_ASSERT(m_Count > 0);
4055  return m_pArray[m_Count - 1];
4056  }
4057 
4058  void reserve(size_t newCapacity, bool freeMemory = false)
4059  {
4060  newCapacity = VMA_MAX(newCapacity, m_Count);
4061 
4062  if((newCapacity < m_Capacity) && !freeMemory)
4063  {
4064  newCapacity = m_Capacity;
4065  }
4066 
4067  if(newCapacity != m_Capacity)
4068  {
4069  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4070  if(m_Count != 0)
4071  {
4072  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4073  }
4074  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4075  m_Capacity = newCapacity;
4076  m_pArray = newArray;
4077  }
4078  }
4079 
4080  void resize(size_t newCount, bool freeMemory = false)
4081  {
4082  size_t newCapacity = m_Capacity;
4083  if(newCount > m_Capacity)
4084  {
4085  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4086  }
4087  else if(freeMemory)
4088  {
4089  newCapacity = newCount;
4090  }
4091 
4092  if(newCapacity != m_Capacity)
4093  {
4094  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4095  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4096  if(elementsToCopy != 0)
4097  {
4098  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4099  }
4100  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4101  m_Capacity = newCapacity;
4102  m_pArray = newArray;
4103  }
4104 
4105  m_Count = newCount;
4106  }
4107 
4108  void clear(bool freeMemory = false)
4109  {
4110  resize(0, freeMemory);
4111  }
4112 
4113  void insert(size_t index, const T& src)
4114  {
4115  VMA_HEAVY_ASSERT(index <= m_Count);
4116  const size_t oldCount = size();
4117  resize(oldCount + 1);
4118  if(index < oldCount)
4119  {
4120  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4121  }
4122  m_pArray[index] = src;
4123  }
4124 
4125  void remove(size_t index)
4126  {
4127  VMA_HEAVY_ASSERT(index < m_Count);
4128  const size_t oldCount = size();
4129  if(index < oldCount - 1)
4130  {
4131  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4132  }
4133  resize(oldCount - 1);
4134  }
4135 
4136  void push_back(const T& src)
4137  {
4138  const size_t newIndex = size();
4139  resize(newIndex + 1);
4140  m_pArray[newIndex] = src;
4141  }
4142 
4143  void pop_back()
4144  {
4145  VMA_HEAVY_ASSERT(m_Count > 0);
4146  resize(size() - 1);
4147  }
4148 
4149  void push_front(const T& src)
4150  {
4151  insert(0, src);
4152  }
4153 
4154  void pop_front()
4155  {
4156  VMA_HEAVY_ASSERT(m_Count > 0);
4157  remove(0);
4158  }
4159 
4160  typedef T* iterator;
4161 
4162  iterator begin() { return m_pArray; }
4163  iterator end() { return m_pArray + m_Count; }
4164 
4165 private:
4166  AllocatorT m_Allocator;
4167  T* m_pArray;
4168  size_t m_Count;
4169  size_t m_Capacity;
4170 };
4171 
4172 template<typename T, typename allocatorT>
4173 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4174 {
4175  vec.insert(index, item);
4176 }
4177 
4178 template<typename T, typename allocatorT>
4179 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4180 {
4181  vec.remove(index);
4182 }
4183 
4184 #endif // #if VMA_USE_STL_VECTOR
4185 
4186 template<typename CmpLess, typename VectorT>
4187 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4188 {
4189  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4190  vector.data(),
4191  vector.data() + vector.size(),
4192  value,
4193  CmpLess()) - vector.data();
4194  VmaVectorInsert(vector, indexToInsert, value);
4195  return indexToInsert;
4196 }
4197 
4198 template<typename CmpLess, typename VectorT>
4199 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4200 {
4201  CmpLess comparator;
4202  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4203  vector.begin(),
4204  vector.end(),
4205  value,
4206  comparator);
4207  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4208  {
4209  size_t indexToRemove = it - vector.begin();
4210  VmaVectorRemove(vector, indexToRemove);
4211  return true;
4212  }
4213  return false;
4214 }
4215 
4216 template<typename CmpLess, typename IterT, typename KeyT>
4217 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4218 {
4219  CmpLess comparator;
4220  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4221  beg, end, value, comparator);
4222  if(it == end ||
4223  (!comparator(*it, value) && !comparator(value, *it)))
4224  {
4225  return it;
4226  }
4227  return end;
4228 }
4229 
4231 // class VmaPoolAllocator
4232 
4233 /*
4234 Allocator for objects of type T using a list of arrays (pools) to speed up
4235 allocation. Number of elements that can be allocated is not bounded because
4236 allocator can create multiple blocks.
4237 */
4238 template<typename T>
4239 class VmaPoolAllocator
4240 {
4241  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4242 public:
4243  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4244  ~VmaPoolAllocator();
4245  void Clear();
4246  T* Alloc();
4247  void Free(T* ptr);
4248 
4249 private:
4250  union Item
4251  {
4252  uint32_t NextFreeIndex;
4253  T Value;
4254  };
4255 
4256  struct ItemBlock
4257  {
4258  Item* pItems;
4259  uint32_t Capacity;
4260  uint32_t FirstFreeIndex;
4261  };
4262 
4263  const VkAllocationCallbacks* m_pAllocationCallbacks;
4264  const uint32_t m_FirstBlockCapacity;
4265  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4266 
4267  ItemBlock& CreateNewBlock();
4268 };
4269 
4270 template<typename T>
4271 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4272  m_pAllocationCallbacks(pAllocationCallbacks),
4273  m_FirstBlockCapacity(firstBlockCapacity),
4274  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4275 {
4276  VMA_ASSERT(m_FirstBlockCapacity > 1);
4277 }
4278 
4279 template<typename T>
4280 VmaPoolAllocator<T>::~VmaPoolAllocator()
4281 {
4282  Clear();
4283 }
4284 
4285 template<typename T>
4286 void VmaPoolAllocator<T>::Clear()
4287 {
4288  for(size_t i = m_ItemBlocks.size(); i--; )
4289  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4290  m_ItemBlocks.clear();
4291 }
4292 
4293 template<typename T>
4294 T* VmaPoolAllocator<T>::Alloc()
4295 {
4296  for(size_t i = m_ItemBlocks.size(); i--; )
4297  {
4298  ItemBlock& block = m_ItemBlocks[i];
4299  // This block has some free items: Use first one.
4300  if(block.FirstFreeIndex != UINT32_MAX)
4301  {
4302  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4303  block.FirstFreeIndex = pItem->NextFreeIndex;
4304  return &pItem->Value;
4305  }
4306  }
4307 
4308  // No block has free item: Create new one and use it.
4309  ItemBlock& newBlock = CreateNewBlock();
4310  Item* const pItem = &newBlock.pItems[0];
4311  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4312  return &pItem->Value;
4313 }
4314 
4315 template<typename T>
4316 void VmaPoolAllocator<T>::Free(T* ptr)
4317 {
4318  // Search all memory blocks to find ptr.
4319  for(size_t i = m_ItemBlocks.size(); i--; )
4320  {
4321  ItemBlock& block = m_ItemBlocks[i];
4322 
4323  // Casting to union.
4324  Item* pItemPtr;
4325  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4326 
4327  // Check if pItemPtr is in address range of this block.
4328  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4329  {
4330  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4331  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4332  block.FirstFreeIndex = index;
4333  return;
4334  }
4335  }
4336  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4337 }
4338 
4339 template<typename T>
4340 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4341 {
4342  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4343  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4344 
4345  const ItemBlock newBlock = {
4346  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4347  newBlockCapacity,
4348  0 };
4349 
4350  m_ItemBlocks.push_back(newBlock);
4351 
4352  // Setup singly-linked list of all free items in this block.
4353  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4354  newBlock.pItems[i].NextFreeIndex = i + 1;
4355  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4356  return m_ItemBlocks.back();
4357 }
4358 
4360 // class VmaRawList, VmaList
4361 
4362 #if VMA_USE_STL_LIST
4363 
4364 #define VmaList std::list
4365 
4366 #else // #if VMA_USE_STL_LIST
4367 
4368 template<typename T>
4369 struct VmaListItem
4370 {
4371  VmaListItem* pPrev;
4372  VmaListItem* pNext;
4373  T Value;
4374 };
4375 
4376 // Doubly linked list.
4377 template<typename T>
4378 class VmaRawList
4379 {
4380  VMA_CLASS_NO_COPY(VmaRawList)
4381 public:
4382  typedef VmaListItem<T> ItemType;
4383 
4384  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4385  ~VmaRawList();
4386  void Clear();
4387 
4388  size_t GetCount() const { return m_Count; }
4389  bool IsEmpty() const { return m_Count == 0; }
4390 
4391  ItemType* Front() { return m_pFront; }
4392  const ItemType* Front() const { return m_pFront; }
4393  ItemType* Back() { return m_pBack; }
4394  const ItemType* Back() const { return m_pBack; }
4395 
4396  ItemType* PushBack();
4397  ItemType* PushFront();
4398  ItemType* PushBack(const T& value);
4399  ItemType* PushFront(const T& value);
4400  void PopBack();
4401  void PopFront();
4402 
4403  // Item can be null - it means PushBack.
4404  ItemType* InsertBefore(ItemType* pItem);
4405  // Item can be null - it means PushFront.
4406  ItemType* InsertAfter(ItemType* pItem);
4407 
4408  ItemType* InsertBefore(ItemType* pItem, const T& value);
4409  ItemType* InsertAfter(ItemType* pItem, const T& value);
4410 
4411  void Remove(ItemType* pItem);
4412 
4413 private:
4414  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4415  VmaPoolAllocator<ItemType> m_ItemAllocator;
4416  ItemType* m_pFront;
4417  ItemType* m_pBack;
4418  size_t m_Count;
4419 };
4420 
4421 template<typename T>
4422 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4423  m_pAllocationCallbacks(pAllocationCallbacks),
4424  m_ItemAllocator(pAllocationCallbacks, 128),
4425  m_pFront(VMA_NULL),
4426  m_pBack(VMA_NULL),
4427  m_Count(0)
4428 {
4429 }
4430 
4431 template<typename T>
4432 VmaRawList<T>::~VmaRawList()
4433 {
4434  // Intentionally not calling Clear, because that would be unnecessary
4435  // computations to return all items to m_ItemAllocator as free.
4436 }
4437 
4438 template<typename T>
4439 void VmaRawList<T>::Clear()
4440 {
4441  if(IsEmpty() == false)
4442  {
4443  ItemType* pItem = m_pBack;
4444  while(pItem != VMA_NULL)
4445  {
4446  ItemType* const pPrevItem = pItem->pPrev;
4447  m_ItemAllocator.Free(pItem);
4448  pItem = pPrevItem;
4449  }
4450  m_pFront = VMA_NULL;
4451  m_pBack = VMA_NULL;
4452  m_Count = 0;
4453  }
4454 }
4455 
4456 template<typename T>
4457 VmaListItem<T>* VmaRawList<T>::PushBack()
4458 {
4459  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4460  pNewItem->pNext = VMA_NULL;
4461  if(IsEmpty())
4462  {
4463  pNewItem->pPrev = VMA_NULL;
4464  m_pFront = pNewItem;
4465  m_pBack = pNewItem;
4466  m_Count = 1;
4467  }
4468  else
4469  {
4470  pNewItem->pPrev = m_pBack;
4471  m_pBack->pNext = pNewItem;
4472  m_pBack = pNewItem;
4473  ++m_Count;
4474  }
4475  return pNewItem;
4476 }
4477 
4478 template<typename T>
4479 VmaListItem<T>* VmaRawList<T>::PushFront()
4480 {
4481  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4482  pNewItem->pPrev = VMA_NULL;
4483  if(IsEmpty())
4484  {
4485  pNewItem->pNext = VMA_NULL;
4486  m_pFront = pNewItem;
4487  m_pBack = pNewItem;
4488  m_Count = 1;
4489  }
4490  else
4491  {
4492  pNewItem->pNext = m_pFront;
4493  m_pFront->pPrev = pNewItem;
4494  m_pFront = pNewItem;
4495  ++m_Count;
4496  }
4497  return pNewItem;
4498 }
4499 
4500 template<typename T>
4501 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4502 {
4503  ItemType* const pNewItem = PushBack();
4504  pNewItem->Value = value;
4505  return pNewItem;
4506 }
4507 
4508 template<typename T>
4509 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4510 {
4511  ItemType* const pNewItem = PushFront();
4512  pNewItem->Value = value;
4513  return pNewItem;
4514 }
4515 
4516 template<typename T>
4517 void VmaRawList<T>::PopBack()
4518 {
4519  VMA_HEAVY_ASSERT(m_Count > 0);
4520  ItemType* const pBackItem = m_pBack;
4521  ItemType* const pPrevItem = pBackItem->pPrev;
4522  if(pPrevItem != VMA_NULL)
4523  {
4524  pPrevItem->pNext = VMA_NULL;
4525  }
4526  m_pBack = pPrevItem;
4527  m_ItemAllocator.Free(pBackItem);
4528  --m_Count;
4529 }
4530 
4531 template<typename T>
4532 void VmaRawList<T>::PopFront()
4533 {
4534  VMA_HEAVY_ASSERT(m_Count > 0);
4535  ItemType* const pFrontItem = m_pFront;
4536  ItemType* const pNextItem = pFrontItem->pNext;
4537  if(pNextItem != VMA_NULL)
4538  {
4539  pNextItem->pPrev = VMA_NULL;
4540  }
4541  m_pFront = pNextItem;
4542  m_ItemAllocator.Free(pFrontItem);
4543  --m_Count;
4544 }
4545 
4546 template<typename T>
4547 void VmaRawList<T>::Remove(ItemType* pItem)
4548 {
4549  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4550  VMA_HEAVY_ASSERT(m_Count > 0);
4551 
4552  if(pItem->pPrev != VMA_NULL)
4553  {
4554  pItem->pPrev->pNext = pItem->pNext;
4555  }
4556  else
4557  {
4558  VMA_HEAVY_ASSERT(m_pFront == pItem);
4559  m_pFront = pItem->pNext;
4560  }
4561 
4562  if(pItem->pNext != VMA_NULL)
4563  {
4564  pItem->pNext->pPrev = pItem->pPrev;
4565  }
4566  else
4567  {
4568  VMA_HEAVY_ASSERT(m_pBack == pItem);
4569  m_pBack = pItem->pPrev;
4570  }
4571 
4572  m_ItemAllocator.Free(pItem);
4573  --m_Count;
4574 }
4575 
4576 template<typename T>
4577 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4578 {
4579  if(pItem != VMA_NULL)
4580  {
4581  ItemType* const prevItem = pItem->pPrev;
4582  ItemType* const newItem = m_ItemAllocator.Alloc();
4583  newItem->pPrev = prevItem;
4584  newItem->pNext = pItem;
4585  pItem->pPrev = newItem;
4586  if(prevItem != VMA_NULL)
4587  {
4588  prevItem->pNext = newItem;
4589  }
4590  else
4591  {
4592  VMA_HEAVY_ASSERT(m_pFront == pItem);
4593  m_pFront = newItem;
4594  }
4595  ++m_Count;
4596  return newItem;
4597  }
4598  else
4599  return PushBack();
4600 }
4601 
4602 template<typename T>
4603 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4604 {
4605  if(pItem != VMA_NULL)
4606  {
4607  ItemType* const nextItem = pItem->pNext;
4608  ItemType* const newItem = m_ItemAllocator.Alloc();
4609  newItem->pNext = nextItem;
4610  newItem->pPrev = pItem;
4611  pItem->pNext = newItem;
4612  if(nextItem != VMA_NULL)
4613  {
4614  nextItem->pPrev = newItem;
4615  }
4616  else
4617  {
4618  VMA_HEAVY_ASSERT(m_pBack == pItem);
4619  m_pBack = newItem;
4620  }
4621  ++m_Count;
4622  return newItem;
4623  }
4624  else
4625  return PushFront();
4626 }
4627 
4628 template<typename T>
4629 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4630 {
4631  ItemType* const newItem = InsertBefore(pItem);
4632  newItem->Value = value;
4633  return newItem;
4634 }
4635 
4636 template<typename T>
4637 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4638 {
4639  ItemType* const newItem = InsertAfter(pItem);
4640  newItem->Value = value;
4641  return newItem;
4642 }
4643 
4644 template<typename T, typename AllocatorT>
4645 class VmaList
4646 {
4647  VMA_CLASS_NO_COPY(VmaList)
4648 public:
4649  class iterator
4650  {
4651  public:
4652  iterator() :
4653  m_pList(VMA_NULL),
4654  m_pItem(VMA_NULL)
4655  {
4656  }
4657 
4658  T& operator*() const
4659  {
4660  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4661  return m_pItem->Value;
4662  }
4663  T* operator->() const
4664  {
4665  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4666  return &m_pItem->Value;
4667  }
4668 
4669  iterator& operator++()
4670  {
4671  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4672  m_pItem = m_pItem->pNext;
4673  return *this;
4674  }
4675  iterator& operator--()
4676  {
4677  if(m_pItem != VMA_NULL)
4678  {
4679  m_pItem = m_pItem->pPrev;
4680  }
4681  else
4682  {
4683  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4684  m_pItem = m_pList->Back();
4685  }
4686  return *this;
4687  }
4688 
4689  iterator operator++(int)
4690  {
4691  iterator result = *this;
4692  ++*this;
4693  return result;
4694  }
4695  iterator operator--(int)
4696  {
4697  iterator result = *this;
4698  --*this;
4699  return result;
4700  }
4701 
4702  bool operator==(const iterator& rhs) const
4703  {
4704  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4705  return m_pItem == rhs.m_pItem;
4706  }
4707  bool operator!=(const iterator& rhs) const
4708  {
4709  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4710  return m_pItem != rhs.m_pItem;
4711  }
4712 
4713  private:
4714  VmaRawList<T>* m_pList;
4715  VmaListItem<T>* m_pItem;
4716 
4717  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4718  m_pList(pList),
4719  m_pItem(pItem)
4720  {
4721  }
4722 
4723  friend class VmaList<T, AllocatorT>;
4724  };
4725 
4726  class const_iterator
4727  {
4728  public:
4729  const_iterator() :
4730  m_pList(VMA_NULL),
4731  m_pItem(VMA_NULL)
4732  {
4733  }
4734 
4735  const_iterator(const iterator& src) :
4736  m_pList(src.m_pList),
4737  m_pItem(src.m_pItem)
4738  {
4739  }
4740 
4741  const T& operator*() const
4742  {
4743  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4744  return m_pItem->Value;
4745  }
4746  const T* operator->() const
4747  {
4748  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4749  return &m_pItem->Value;
4750  }
4751 
4752  const_iterator& operator++()
4753  {
4754  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4755  m_pItem = m_pItem->pNext;
4756  return *this;
4757  }
4758  const_iterator& operator--()
4759  {
4760  if(m_pItem != VMA_NULL)
4761  {
4762  m_pItem = m_pItem->pPrev;
4763  }
4764  else
4765  {
4766  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4767  m_pItem = m_pList->Back();
4768  }
4769  return *this;
4770  }
4771 
4772  const_iterator operator++(int)
4773  {
4774  const_iterator result = *this;
4775  ++*this;
4776  return result;
4777  }
4778  const_iterator operator--(int)
4779  {
4780  const_iterator result = *this;
4781  --*this;
4782  return result;
4783  }
4784 
4785  bool operator==(const const_iterator& rhs) const
4786  {
4787  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4788  return m_pItem == rhs.m_pItem;
4789  }
4790  bool operator!=(const const_iterator& rhs) const
4791  {
4792  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4793  return m_pItem != rhs.m_pItem;
4794  }
4795 
4796  private:
4797  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4798  m_pList(pList),
4799  m_pItem(pItem)
4800  {
4801  }
4802 
4803  const VmaRawList<T>* m_pList;
4804  const VmaListItem<T>* m_pItem;
4805 
4806  friend class VmaList<T, AllocatorT>;
4807  };
4808 
4809  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4810 
4811  bool empty() const { return m_RawList.IsEmpty(); }
4812  size_t size() const { return m_RawList.GetCount(); }
4813 
4814  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4815  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4816 
4817  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4818  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4819 
4820  void clear() { m_RawList.Clear(); }
4821  void push_back(const T& value) { m_RawList.PushBack(value); }
4822  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4823  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4824 
4825 private:
4826  VmaRawList<T> m_RawList;
4827 };
4828 
4829 #endif // #if VMA_USE_STL_LIST
4830 
4832 // class VmaMap
4833 
4834 // Unused in this version.
4835 #if 0
4836 
4837 #if VMA_USE_STL_UNORDERED_MAP
4838 
4839 #define VmaPair std::pair
4840 
4841 #define VMA_MAP_TYPE(KeyT, ValueT) \
4842  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4843 
4844 #else // #if VMA_USE_STL_UNORDERED_MAP
4845 
4846 template<typename T1, typename T2>
4847 struct VmaPair
4848 {
4849  T1 first;
4850  T2 second;
4851 
4852  VmaPair() : first(), second() { }
4853  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4854 };
4855 
4856 /* Class compatible with subset of interface of std::unordered_map.
4857 KeyT, ValueT must be POD because they will be stored in VmaVector.
4858 */
4859 template<typename KeyT, typename ValueT>
4860 class VmaMap
4861 {
4862 public:
4863  typedef VmaPair<KeyT, ValueT> PairType;
4864  typedef PairType* iterator;
4865 
4866  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4867 
4868  iterator begin() { return m_Vector.begin(); }
4869  iterator end() { return m_Vector.end(); }
4870 
4871  void insert(const PairType& pair);
4872  iterator find(const KeyT& key);
4873  void erase(iterator it);
4874 
4875 private:
4876  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4877 };
4878 
4879 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4880 
4881 template<typename FirstT, typename SecondT>
4882 struct VmaPairFirstLess
4883 {
4884  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4885  {
4886  return lhs.first < rhs.first;
4887  }
4888  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4889  {
4890  return lhs.first < rhsFirst;
4891  }
4892 };
4893 
4894 template<typename KeyT, typename ValueT>
4895 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4896 {
4897  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4898  m_Vector.data(),
4899  m_Vector.data() + m_Vector.size(),
4900  pair,
4901  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4902  VmaVectorInsert(m_Vector, indexToInsert, pair);
4903 }
4904 
4905 template<typename KeyT, typename ValueT>
4906 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4907 {
4908  PairType* it = VmaBinaryFindFirstNotLess(
4909  m_Vector.data(),
4910  m_Vector.data() + m_Vector.size(),
4911  key,
4912  VmaPairFirstLess<KeyT, ValueT>());
4913  if((it != m_Vector.end()) && (it->first == key))
4914  {
4915  return it;
4916  }
4917  else
4918  {
4919  return m_Vector.end();
4920  }
4921 }
4922 
4923 template<typename KeyT, typename ValueT>
4924 void VmaMap<KeyT, ValueT>::erase(iterator it)
4925 {
4926  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4927 }
4928 
4929 #endif // #if VMA_USE_STL_UNORDERED_MAP
4930 
4931 #endif // #if 0
4932 
4934 
4935 class VmaDeviceMemoryBlock;
4936 
4937 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4938 
4939 struct VmaAllocation_T
4940 {
4941 private:
4942  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4943 
4944  enum FLAGS
4945  {
4946  FLAG_USER_DATA_STRING = 0x01,
4947  };
4948 
4949 public:
4950  enum ALLOCATION_TYPE
4951  {
4952  ALLOCATION_TYPE_NONE,
4953  ALLOCATION_TYPE_BLOCK,
4954  ALLOCATION_TYPE_DEDICATED,
4955  };
4956 
4957  /*
4958  This struct cannot have constructor or destructor. It must be POD because it is
4959  allocated using VmaPoolAllocator.
4960  */
4961 
4962  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4963  {
4964  m_Alignment = 1;
4965  m_Size = 0;
4966  m_pUserData = VMA_NULL;
4967  m_LastUseFrameIndex = currentFrameIndex;
4968  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4969  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4970  m_MapCount = 0;
4971  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4972 
4973 #if VMA_STATS_STRING_ENABLED
4974  m_CreationFrameIndex = currentFrameIndex;
4975  m_BufferImageUsage = 0;
4976 #endif
4977  }
4978 
4979  void Dtor()
4980  {
4981  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4982 
4983  // Check if owned string was freed.
4984  VMA_ASSERT(m_pUserData == VMA_NULL);
4985  }
4986 
4987  void InitBlockAllocation(
4988  VmaDeviceMemoryBlock* block,
4989  VkDeviceSize offset,
4990  VkDeviceSize alignment,
4991  VkDeviceSize size,
4992  VmaSuballocationType suballocationType,
4993  bool mapped,
4994  bool canBecomeLost)
4995  {
4996  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4997  VMA_ASSERT(block != VMA_NULL);
4998  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4999  m_Alignment = alignment;
5000  m_Size = size;
5001  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5002  m_SuballocationType = (uint8_t)suballocationType;
5003  m_BlockAllocation.m_Block = block;
5004  m_BlockAllocation.m_Offset = offset;
5005  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5006  }
5007 
5008  void InitLost()
5009  {
5010  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5011  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5012  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5013  m_BlockAllocation.m_Block = VMA_NULL;
5014  m_BlockAllocation.m_Offset = 0;
5015  m_BlockAllocation.m_CanBecomeLost = true;
5016  }
5017 
5018  void ChangeBlockAllocation(
5019  VmaAllocator hAllocator,
5020  VmaDeviceMemoryBlock* block,
5021  VkDeviceSize offset);
5022 
5023  void ChangeSize(VkDeviceSize newSize);
5024  void ChangeOffset(VkDeviceSize newOffset);
5025 
5026  // pMappedData not null means allocation is created with MAPPED flag.
5027  void InitDedicatedAllocation(
5028  uint32_t memoryTypeIndex,
5029  VkDeviceMemory hMemory,
5030  VmaSuballocationType suballocationType,
5031  void* pMappedData,
5032  VkDeviceSize size)
5033  {
5034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5035  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5036  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5037  m_Alignment = 0;
5038  m_Size = size;
5039  m_SuballocationType = (uint8_t)suballocationType;
5040  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5041  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5042  m_DedicatedAllocation.m_hMemory = hMemory;
5043  m_DedicatedAllocation.m_pMappedData = pMappedData;
5044  }
5045 
5046  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5047  VkDeviceSize GetAlignment() const { return m_Alignment; }
5048  VkDeviceSize GetSize() const { return m_Size; }
5049  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5050  void* GetUserData() const { return m_pUserData; }
5051  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5052  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5053 
5054  VmaDeviceMemoryBlock* GetBlock() const
5055  {
5056  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5057  return m_BlockAllocation.m_Block;
5058  }
5059  VkDeviceSize GetOffset() const;
5060  VkDeviceMemory GetMemory() const;
5061  uint32_t GetMemoryTypeIndex() const;
5062  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5063  void* GetMappedData() const;
5064  bool CanBecomeLost() const;
5065 
5066  uint32_t GetLastUseFrameIndex() const
5067  {
5068  return m_LastUseFrameIndex.load();
5069  }
5070  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5071  {
5072  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5073  }
5074  /*
5075  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5076  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5077  - Else, returns false.
5078 
5079  If hAllocation is already lost, assert - you should not call it then.
5080  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5081  */
5082  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5083 
5084  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5085  {
5086  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5087  outInfo.blockCount = 1;
5088  outInfo.allocationCount = 1;
5089  outInfo.unusedRangeCount = 0;
5090  outInfo.usedBytes = m_Size;
5091  outInfo.unusedBytes = 0;
5092  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5093  outInfo.unusedRangeSizeMin = UINT64_MAX;
5094  outInfo.unusedRangeSizeMax = 0;
5095  }
5096 
5097  void BlockAllocMap();
5098  void BlockAllocUnmap();
5099  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5100  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5101 
5102 #if VMA_STATS_STRING_ENABLED
5103  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5104  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5105 
5106  void InitBufferImageUsage(uint32_t bufferImageUsage)
5107  {
5108  VMA_ASSERT(m_BufferImageUsage == 0);
5109  m_BufferImageUsage = bufferImageUsage;
5110  }
5111 
5112  void PrintParameters(class VmaJsonWriter& json) const;
5113 #endif
5114 
5115 private:
5116  VkDeviceSize m_Alignment;
5117  VkDeviceSize m_Size;
5118  void* m_pUserData;
5119  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5120  uint8_t m_Type; // ALLOCATION_TYPE
5121  uint8_t m_SuballocationType; // VmaSuballocationType
5122  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5123  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5124  uint8_t m_MapCount;
5125  uint8_t m_Flags; // enum FLAGS
5126 
5127  // Allocation out of VmaDeviceMemoryBlock.
5128  struct BlockAllocation
5129  {
5130  VmaDeviceMemoryBlock* m_Block;
5131  VkDeviceSize m_Offset;
5132  bool m_CanBecomeLost;
5133  };
5134 
5135  // Allocation for an object that has its own private VkDeviceMemory.
5136  struct DedicatedAllocation
5137  {
5138  uint32_t m_MemoryTypeIndex;
5139  VkDeviceMemory m_hMemory;
5140  void* m_pMappedData; // Not null means memory is mapped.
5141  };
5142 
5143  union
5144  {
5145  // Allocation out of VmaDeviceMemoryBlock.
5146  BlockAllocation m_BlockAllocation;
5147  // Allocation for an object that has its own private VkDeviceMemory.
5148  DedicatedAllocation m_DedicatedAllocation;
5149  };
5150 
5151 #if VMA_STATS_STRING_ENABLED
5152  uint32_t m_CreationFrameIndex;
5153  uint32_t m_BufferImageUsage; // 0 if unknown.
5154 #endif
5155 
5156  void FreeUserDataString(VmaAllocator hAllocator);
5157 };
5158 
5159 /*
5160 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5161 allocated memory block or free.
5162 */
5163 struct VmaSuballocation
5164 {
5165  VkDeviceSize offset;
5166  VkDeviceSize size;
5167  VmaAllocation hAllocation;
5168  VmaSuballocationType type;
5169 };
5170 
5171 // Comparator for offsets.
5172 struct VmaSuballocationOffsetLess
5173 {
5174  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5175  {
5176  return lhs.offset < rhs.offset;
5177  }
5178 };
5179 struct VmaSuballocationOffsetGreater
5180 {
5181  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5182  {
5183  return lhs.offset > rhs.offset;
5184  }
5185 };
5186 
5187 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5188 
5189 // Cost of one additional allocation lost, as equivalent in bytes.
5190 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5191 
5192 enum class VmaAllocationRequestType
5193 {
5194  Normal,
5195  // Used by "Linear" algorithm.
5196  UpperAddress,
5197  EndOf1st,
5198  EndOf2nd,
5199 };
5200 
5201 /*
5202 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5203 
5204 If canMakeOtherLost was false:
5205 - item points to a FREE suballocation.
5206 - itemsToMakeLostCount is 0.
5207 
5208 If canMakeOtherLost was true:
5209 - item points to first of sequence of suballocations, which are either FREE,
5210  or point to VmaAllocations that can become lost.
5211 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5212  the requested allocation to succeed.
5213 */
5214 struct VmaAllocationRequest
5215 {
5216  VkDeviceSize offset;
5217  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5218  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5219  VmaSuballocationList::iterator item;
5220  size_t itemsToMakeLostCount;
5221  void* customData;
5222  VmaAllocationRequestType type;
5223 
5224  VkDeviceSize CalcCost() const
5225  {
5226  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5227  }
5228 };
5229 
5230 /*
5231 Data structure used for bookkeeping of allocations and unused ranges of memory
5232 in a single VkDeviceMemory block.
5233 */
5234 class VmaBlockMetadata
5235 {
5236 public:
5237  VmaBlockMetadata(VmaAllocator hAllocator);
5238  virtual ~VmaBlockMetadata() { }
5239  virtual void Init(VkDeviceSize size) { m_Size = size; }
5240 
5241  // Validates all data structures inside this object. If not valid, returns false.
5242  virtual bool Validate() const = 0;
5243  VkDeviceSize GetSize() const { return m_Size; }
5244  virtual size_t GetAllocationCount() const = 0;
5245  virtual VkDeviceSize GetSumFreeSize() const = 0;
5246  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5247  // Returns true if this block is empty - contains only single free suballocation.
5248  virtual bool IsEmpty() const = 0;
5249 
5250  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5251  // Shouldn't modify blockCount.
5252  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5253 
5254 #if VMA_STATS_STRING_ENABLED
5255  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5256 #endif
5257 
5258  // Tries to find a place for suballocation with given parameters inside this block.
5259  // If succeeded, fills pAllocationRequest and returns true.
5260  // If failed, returns false.
5261  virtual bool CreateAllocationRequest(
5262  uint32_t currentFrameIndex,
5263  uint32_t frameInUseCount,
5264  VkDeviceSize bufferImageGranularity,
5265  VkDeviceSize allocSize,
5266  VkDeviceSize allocAlignment,
5267  bool upperAddress,
5268  VmaSuballocationType allocType,
5269  bool canMakeOtherLost,
5270  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5271  uint32_t strategy,
5272  VmaAllocationRequest* pAllocationRequest) = 0;
5273 
5274  virtual bool MakeRequestedAllocationsLost(
5275  uint32_t currentFrameIndex,
5276  uint32_t frameInUseCount,
5277  VmaAllocationRequest* pAllocationRequest) = 0;
5278 
5279  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5280 
5281  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5282 
5283  // Makes actual allocation based on request. Request must already be checked and valid.
5284  virtual void Alloc(
5285  const VmaAllocationRequest& request,
5286  VmaSuballocationType type,
5287  VkDeviceSize allocSize,
5288  VmaAllocation hAllocation) = 0;
5289 
5290  // Frees suballocation assigned to given memory region.
5291  virtual void Free(const VmaAllocation allocation) = 0;
5292  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5293 
5294  // Tries to resize (grow or shrink) space for given allocation, in place.
5295  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5296 
5297 protected:
5298  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5299 
5300 #if VMA_STATS_STRING_ENABLED
5301  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5302  VkDeviceSize unusedBytes,
5303  size_t allocationCount,
5304  size_t unusedRangeCount) const;
5305  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5306  VkDeviceSize offset,
5307  VmaAllocation hAllocation) const;
5308  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5309  VkDeviceSize offset,
5310  VkDeviceSize size) const;
5311  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5312 #endif
5313 
5314 private:
5315  VkDeviceSize m_Size;
5316  const VkAllocationCallbacks* m_pAllocationCallbacks;
5317 };
5318 
5319 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5320  VMA_ASSERT(0 && "Validation failed: " #cond); \
5321  return false; \
5322  } } while(false)
5323 
5324 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5325 {
5326  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5327 public:
5328  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5329  virtual ~VmaBlockMetadata_Generic();
5330  virtual void Init(VkDeviceSize size);
5331 
5332  virtual bool Validate() const;
5333  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5334  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5335  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5336  virtual bool IsEmpty() const;
5337 
5338  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5339  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5340 
5341 #if VMA_STATS_STRING_ENABLED
5342  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5343 #endif
5344 
5345  virtual bool CreateAllocationRequest(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  bool upperAddress,
5352  VmaSuballocationType allocType,
5353  bool canMakeOtherLost,
5354  uint32_t strategy,
5355  VmaAllocationRequest* pAllocationRequest);
5356 
5357  virtual bool MakeRequestedAllocationsLost(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VmaAllocationRequest* pAllocationRequest);
5361 
5362  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5363 
5364  virtual VkResult CheckCorruption(const void* pBlockData);
5365 
5366  virtual void Alloc(
5367  const VmaAllocationRequest& request,
5368  VmaSuballocationType type,
5369  VkDeviceSize allocSize,
5370  VmaAllocation hAllocation);
5371 
5372  virtual void Free(const VmaAllocation allocation);
5373  virtual void FreeAtOffset(VkDeviceSize offset);
5374 
5375  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5376 
5378  // For defragmentation
5379 
5380  bool IsBufferImageGranularityConflictPossible(
5381  VkDeviceSize bufferImageGranularity,
5382  VmaSuballocationType& inOutPrevSuballocType) const;
5383 
5384 private:
5385  friend class VmaDefragmentationAlgorithm_Generic;
5386  friend class VmaDefragmentationAlgorithm_Fast;
5387 
5388  uint32_t m_FreeCount;
5389  VkDeviceSize m_SumFreeSize;
5390  VmaSuballocationList m_Suballocations;
5391  // Suballocations that are free and have size greater than certain threshold.
5392  // Sorted by size, ascending.
5393  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5394 
5395  bool ValidateFreeSuballocationList() const;
5396 
5397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5398  // If yes, fills pOffset and returns true. If no, returns false.
5399  bool CheckAllocation(
5400  uint32_t currentFrameIndex,
5401  uint32_t frameInUseCount,
5402  VkDeviceSize bufferImageGranularity,
5403  VkDeviceSize allocSize,
5404  VkDeviceSize allocAlignment,
5405  VmaSuballocationType allocType,
5406  VmaSuballocationList::const_iterator suballocItem,
5407  bool canMakeOtherLost,
5408  VkDeviceSize* pOffset,
5409  size_t* itemsToMakeLostCount,
5410  VkDeviceSize* pSumFreeSize,
5411  VkDeviceSize* pSumItemSize) const;
5412  // Given free suballocation, it merges it with following one, which must also be free.
5413  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5414  // Releases given suballocation, making it free.
5415  // Merges it with adjacent free suballocations if applicable.
5416  // Returns iterator to new free suballocation at this place.
5417  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5418  // Given free suballocation, it inserts it into sorted list of
5419  // m_FreeSuballocationsBySize if it's suitable.
5420  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5421  // Given free suballocation, it removes it from sorted list of
5422  // m_FreeSuballocationsBySize if it's suitable.
5423  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5424 };
5425 
5426 /*
5427 Allocations and their references in internal data structure look like this:
5428 
5429 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5430 
5431  0 +-------+
5432  | |
5433  | |
5434  | |
5435  +-------+
5436  | Alloc | 1st[m_1stNullItemsBeginCount]
5437  +-------+
5438  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5439  +-------+
5440  | ... |
5441  +-------+
5442  | Alloc | 1st[1st.size() - 1]
5443  +-------+
5444  | |
5445  | |
5446  | |
5447 GetSize() +-------+
5448 
5449 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5450 
5451  0 +-------+
5452  | Alloc | 2nd[0]
5453  +-------+
5454  | Alloc | 2nd[1]
5455  +-------+
5456  | ... |
5457  +-------+
5458  | Alloc | 2nd[2nd.size() - 1]
5459  +-------+
5460  | |
5461  | |
5462  | |
5463  +-------+
5464  | Alloc | 1st[m_1stNullItemsBeginCount]
5465  +-------+
5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 1st[1st.size() - 1]
5471  +-------+
5472  | |
5473 GetSize() +-------+
5474 
5475 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5476 
5477  0 +-------+
5478  | |
5479  | |
5480  | |
5481  +-------+
5482  | Alloc | 1st[m_1stNullItemsBeginCount]
5483  +-------+
5484  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5485  +-------+
5486  | ... |
5487  +-------+
5488  | Alloc | 1st[1st.size() - 1]
5489  +-------+
5490  | |
5491  | |
5492  | |
5493  +-------+
5494  | Alloc | 2nd[2nd.size() - 1]
5495  +-------+
5496  | ... |
5497  +-------+
5498  | Alloc | 2nd[1]
5499  +-------+
5500  | Alloc | 2nd[0]
5501 GetSize() +-------+
5502 
5503 */
5504 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5505 {
5506  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5507 public:
5508  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5509  virtual ~VmaBlockMetadata_Linear();
5510  virtual void Init(VkDeviceSize size);
5511 
5512  virtual bool Validate() const;
5513  virtual size_t GetAllocationCount() const;
5514  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5515  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5516  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5517 
5518  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5519  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5520 
5521 #if VMA_STATS_STRING_ENABLED
5522  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5523 #endif
5524 
5525  virtual bool CreateAllocationRequest(
5526  uint32_t currentFrameIndex,
5527  uint32_t frameInUseCount,
5528  VkDeviceSize bufferImageGranularity,
5529  VkDeviceSize allocSize,
5530  VkDeviceSize allocAlignment,
5531  bool upperAddress,
5532  VmaSuballocationType allocType,
5533  bool canMakeOtherLost,
5534  uint32_t strategy,
5535  VmaAllocationRequest* pAllocationRequest);
5536 
5537  virtual bool MakeRequestedAllocationsLost(
5538  uint32_t currentFrameIndex,
5539  uint32_t frameInUseCount,
5540  VmaAllocationRequest* pAllocationRequest);
5541 
5542  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5543 
5544  virtual VkResult CheckCorruption(const void* pBlockData);
5545 
5546  virtual void Alloc(
5547  const VmaAllocationRequest& request,
5548  VmaSuballocationType type,
5549  VkDeviceSize allocSize,
5550  VmaAllocation hAllocation);
5551 
5552  virtual void Free(const VmaAllocation allocation);
5553  virtual void FreeAtOffset(VkDeviceSize offset);
5554 
5555 private:
5556  /*
5557  There are two suballocation vectors, used in ping-pong way.
5558  The one with index m_1stVectorIndex is called 1st.
5559  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5560  2nd can be non-empty only when 1st is not empty.
5561  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5562  */
5563  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5564 
5565  enum SECOND_VECTOR_MODE
5566  {
5567  SECOND_VECTOR_EMPTY,
5568  /*
5569  Suballocations in 2nd vector are created later than the ones in 1st, but they
5570  all have smaller offset.
5571  */
5572  SECOND_VECTOR_RING_BUFFER,
5573  /*
5574  Suballocations in 2nd vector are upper side of double stack.
5575  They all have offsets higher than those in 1st vector.
5576  Top of this stack means smaller offsets, but higher indices in this vector.
5577  */
5578  SECOND_VECTOR_DOUBLE_STACK,
5579  };
5580 
5581  VkDeviceSize m_SumFreeSize;
5582  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5583  uint32_t m_1stVectorIndex;
5584  SECOND_VECTOR_MODE m_2ndVectorMode;
5585 
5586  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5587  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5588  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5589  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5590 
5591  // Number of items in 1st vector with hAllocation = null at the beginning.
5592  size_t m_1stNullItemsBeginCount;
5593  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5594  size_t m_1stNullItemsMiddleCount;
5595  // Number of items in 2nd vector with hAllocation = null.
5596  size_t m_2ndNullItemsCount;
5597 
5598  bool ShouldCompact1st() const;
5599  void CleanupAfterFree();
5600 
5601  bool CreateAllocationRequest_LowerAddress(
5602  uint32_t currentFrameIndex,
5603  uint32_t frameInUseCount,
5604  VkDeviceSize bufferImageGranularity,
5605  VkDeviceSize allocSize,
5606  VkDeviceSize allocAlignment,
5607  VmaSuballocationType allocType,
5608  bool canMakeOtherLost,
5609  uint32_t strategy,
5610  VmaAllocationRequest* pAllocationRequest);
5611  bool CreateAllocationRequest_UpperAddress(
5612  uint32_t currentFrameIndex,
5613  uint32_t frameInUseCount,
5614  VkDeviceSize bufferImageGranularity,
5615  VkDeviceSize allocSize,
5616  VkDeviceSize allocAlignment,
5617  VmaSuballocationType allocType,
5618  bool canMakeOtherLost,
5619  uint32_t strategy,
5620  VmaAllocationRequest* pAllocationRequest);
5621 };
5622 
5623 /*
5624 - GetSize() is the original size of allocated memory block.
5625 - m_UsableSize is this size aligned down to a power of two.
5626  All allocations and calculations happen relative to m_UsableSize.
5627 - GetUnusableSize() is the difference between them.
5628  It is repoted as separate, unused range, not available for allocations.
5629 
5630 Node at level 0 has size = m_UsableSize.
5631 Each next level contains nodes with size 2 times smaller than current level.
5632 m_LevelCount is the maximum number of levels to use in the current object.
5633 */
5634 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5635 {
5636  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5637 public:
5638  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5639  virtual ~VmaBlockMetadata_Buddy();
5640  virtual void Init(VkDeviceSize size);
5641 
5642  virtual bool Validate() const;
5643  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5644  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5645  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5646  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5647 
5648  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5649  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5650 
5651 #if VMA_STATS_STRING_ENABLED
5652  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5653 #endif
5654 
5655  virtual bool CreateAllocationRequest(
5656  uint32_t currentFrameIndex,
5657  uint32_t frameInUseCount,
5658  VkDeviceSize bufferImageGranularity,
5659  VkDeviceSize allocSize,
5660  VkDeviceSize allocAlignment,
5661  bool upperAddress,
5662  VmaSuballocationType allocType,
5663  bool canMakeOtherLost,
5664  uint32_t strategy,
5665  VmaAllocationRequest* pAllocationRequest);
5666 
5667  virtual bool MakeRequestedAllocationsLost(
5668  uint32_t currentFrameIndex,
5669  uint32_t frameInUseCount,
5670  VmaAllocationRequest* pAllocationRequest);
5671 
5672  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5673 
5674  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5675 
5676  virtual void Alloc(
5677  const VmaAllocationRequest& request,
5678  VmaSuballocationType type,
5679  VkDeviceSize allocSize,
5680  VmaAllocation hAllocation);
5681 
5682  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5683  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5684 
5685 private:
5686  static const VkDeviceSize MIN_NODE_SIZE = 32;
5687  static const size_t MAX_LEVELS = 30;
5688 
5689  struct ValidationContext
5690  {
5691  size_t calculatedAllocationCount;
5692  size_t calculatedFreeCount;
5693  VkDeviceSize calculatedSumFreeSize;
5694 
5695  ValidationContext() :
5696  calculatedAllocationCount(0),
5697  calculatedFreeCount(0),
5698  calculatedSumFreeSize(0) { }
5699  };
5700 
5701  struct Node
5702  {
5703  VkDeviceSize offset;
5704  enum TYPE
5705  {
5706  TYPE_FREE,
5707  TYPE_ALLOCATION,
5708  TYPE_SPLIT,
5709  TYPE_COUNT
5710  } type;
5711  Node* parent;
5712  Node* buddy;
5713 
5714  union
5715  {
5716  struct
5717  {
5718  Node* prev;
5719  Node* next;
5720  } free;
5721  struct
5722  {
5723  VmaAllocation alloc;
5724  } allocation;
5725  struct
5726  {
5727  Node* leftChild;
5728  } split;
5729  };
5730  };
5731 
5732  // Size of the memory block aligned down to a power of two.
5733  VkDeviceSize m_UsableSize;
5734  uint32_t m_LevelCount;
5735 
5736  Node* m_Root;
5737  struct {
5738  Node* front;
5739  Node* back;
5740  } m_FreeList[MAX_LEVELS];
5741  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5742  size_t m_AllocationCount;
5743  // Number of nodes in the tree with type == TYPE_FREE.
5744  size_t m_FreeCount;
5745  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5746  VkDeviceSize m_SumFreeSize;
5747 
5748  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5749  void DeleteNode(Node* node);
5750  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5751  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5752  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5753  // Alloc passed just for validation. Can be null.
5754  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5755  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5756  // Adds node to the front of FreeList at given level.
5757  // node->type must be FREE.
5758  // node->free.prev, next can be undefined.
5759  void AddToFreeListFront(uint32_t level, Node* node);
5760  // Removes node from FreeList at given level.
5761  // node->type must be FREE.
5762  // node->free.prev, next stay untouched.
5763  void RemoveFromFreeList(uint32_t level, Node* node);
5764 
5765 #if VMA_STATS_STRING_ENABLED
5766  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5767 #endif
5768 };
5769 
5770 /*
5771 Represents a single block of device memory (`VkDeviceMemory`) with all the
5772 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5773 
5774 Thread-safety: This class must be externally synchronized.
5775 */
5776 class VmaDeviceMemoryBlock
5777 {
5778  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5779 public:
5780  VmaBlockMetadata* m_pMetadata;
5781 
5782  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5783 
5784  ~VmaDeviceMemoryBlock()
5785  {
5786  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5787  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5788  }
5789 
5790  // Always call after construction.
5791  void Init(
5792  VmaAllocator hAllocator,
5793  VmaPool hParentPool,
5794  uint32_t newMemoryTypeIndex,
5795  VkDeviceMemory newMemory,
5796  VkDeviceSize newSize,
5797  uint32_t id,
5798  uint32_t algorithm);
5799  // Always call before destruction.
5800  void Destroy(VmaAllocator allocator);
5801 
5802  VmaPool GetParentPool() const { return m_hParentPool; }
5803  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5804  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5805  uint32_t GetId() const { return m_Id; }
5806  void* GetMappedData() const { return m_pMappedData; }
5807 
5808  // Validates all data structures inside this object. If not valid, returns false.
5809  bool Validate() const;
5810 
5811  VkResult CheckCorruption(VmaAllocator hAllocator);
5812 
5813  // ppData can be null.
5814  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5815  void Unmap(VmaAllocator hAllocator, uint32_t count);
5816 
5817  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5818  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5819 
5820  VkResult BindBufferMemory(
5821  const VmaAllocator hAllocator,
5822  const VmaAllocation hAllocation,
5823  VkBuffer hBuffer);
5824  VkResult BindImageMemory(
5825  const VmaAllocator hAllocator,
5826  const VmaAllocation hAllocation,
5827  VkImage hImage);
5828 
5829 private:
5830  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5831  uint32_t m_MemoryTypeIndex;
5832  uint32_t m_Id;
5833  VkDeviceMemory m_hMemory;
5834 
5835  /*
5836  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5837  Also protects m_MapCount, m_pMappedData.
5838  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5839  */
5840  VMA_MUTEX m_Mutex;
5841  uint32_t m_MapCount;
5842  void* m_pMappedData;
5843 };
5844 
5845 struct VmaPointerLess
5846 {
5847  bool operator()(const void* lhs, const void* rhs) const
5848  {
5849  return lhs < rhs;
5850  }
5851 };
5852 
5853 struct VmaDefragmentationMove
5854 {
5855  size_t srcBlockIndex;
5856  size_t dstBlockIndex;
5857  VkDeviceSize srcOffset;
5858  VkDeviceSize dstOffset;
5859  VkDeviceSize size;
5860 };
5861 
5862 class VmaDefragmentationAlgorithm;
5863 
5864 /*
5865 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5866 Vulkan memory type.
5867 
5868 Synchronized internally with a mutex.
5869 */
5870 struct VmaBlockVector
5871 {
5872  VMA_CLASS_NO_COPY(VmaBlockVector)
5873 public:
5874  VmaBlockVector(
5875  VmaAllocator hAllocator,
5876  VmaPool hParentPool,
5877  uint32_t memoryTypeIndex,
5878  VkDeviceSize preferredBlockSize,
5879  size_t minBlockCount,
5880  size_t maxBlockCount,
5881  VkDeviceSize bufferImageGranularity,
5882  uint32_t frameInUseCount,
5883  bool isCustomPool,
5884  bool explicitBlockSize,
5885  uint32_t algorithm);
5886  ~VmaBlockVector();
5887 
5888  VkResult CreateMinBlocks();
5889 
5890  VmaPool GetParentPool() const { return m_hParentPool; }
5891  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5892  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5893  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5894  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5895  uint32_t GetAlgorithm() const { return m_Algorithm; }
5896 
5897  void GetPoolStats(VmaPoolStats* pStats);
5898 
5899  bool IsEmpty() const { return m_Blocks.empty(); }
5900  bool IsCorruptionDetectionEnabled() const;
5901 
5902  VkResult Allocate(
5903  uint32_t currentFrameIndex,
5904  VkDeviceSize size,
5905  VkDeviceSize alignment,
5906  const VmaAllocationCreateInfo& createInfo,
5907  VmaSuballocationType suballocType,
5908  size_t allocationCount,
5909  VmaAllocation* pAllocations);
5910 
5911  void Free(
5912  VmaAllocation hAllocation);
5913 
5914  // Adds statistics of this BlockVector to pStats.
5915  void AddStats(VmaStats* pStats);
5916 
5917 #if VMA_STATS_STRING_ENABLED
5918  void PrintDetailedMap(class VmaJsonWriter& json);
5919 #endif
5920 
5921  void MakePoolAllocationsLost(
5922  uint32_t currentFrameIndex,
5923  size_t* pLostAllocationCount);
5924  VkResult CheckCorruption();
5925 
5926  // Saves results in pCtx->res.
5927  void Defragment(
5928  class VmaBlockVectorDefragmentationContext* pCtx,
5929  VmaDefragmentationStats* pStats,
5930  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5931  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5932  VkCommandBuffer commandBuffer);
5933  void DefragmentationEnd(
5934  class VmaBlockVectorDefragmentationContext* pCtx,
5935  VmaDefragmentationStats* pStats);
5936 
5938  // To be used only while the m_Mutex is locked. Used during defragmentation.
5939 
5940  size_t GetBlockCount() const { return m_Blocks.size(); }
5941  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5942  size_t CalcAllocationCount() const;
5943  bool IsBufferImageGranularityConflictPossible() const;
5944 
5945 private:
5946  friend class VmaDefragmentationAlgorithm_Generic;
5947 
5948  const VmaAllocator m_hAllocator;
5949  const VmaPool m_hParentPool;
5950  const uint32_t m_MemoryTypeIndex;
5951  const VkDeviceSize m_PreferredBlockSize;
5952  const size_t m_MinBlockCount;
5953  const size_t m_MaxBlockCount;
5954  const VkDeviceSize m_BufferImageGranularity;
5955  const uint32_t m_FrameInUseCount;
5956  const bool m_IsCustomPool;
5957  const bool m_ExplicitBlockSize;
5958  const uint32_t m_Algorithm;
5959  /* There can be at most one allocation that is completely empty - a
5960  hysteresis to avoid pessimistic case of alternating creation and destruction
5961  of a VkDeviceMemory. */
5962  bool m_HasEmptyBlock;
5963  VMA_RW_MUTEX m_Mutex;
5964  // Incrementally sorted by sumFreeSize, ascending.
5965  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5966  uint32_t m_NextBlockId;
5967 
5968  VkDeviceSize CalcMaxBlockSize() const;
5969 
5970  // Finds and removes given block from vector.
5971  void Remove(VmaDeviceMemoryBlock* pBlock);
5972 
5973  // Performs single step in sorting m_Blocks. They may not be fully sorted
5974  // after this call.
5975  void IncrementallySortBlocks();
5976 
5977  VkResult AllocatePage(
5978  uint32_t currentFrameIndex,
5979  VkDeviceSize size,
5980  VkDeviceSize alignment,
5981  const VmaAllocationCreateInfo& createInfo,
5982  VmaSuballocationType suballocType,
5983  VmaAllocation* pAllocation);
5984 
5985  // To be used only without CAN_MAKE_OTHER_LOST flag.
5986  VkResult AllocateFromBlock(
5987  VmaDeviceMemoryBlock* pBlock,
5988  uint32_t currentFrameIndex,
5989  VkDeviceSize size,
5990  VkDeviceSize alignment,
5991  VmaAllocationCreateFlags allocFlags,
5992  void* pUserData,
5993  VmaSuballocationType suballocType,
5994  uint32_t strategy,
5995  VmaAllocation* pAllocation);
5996 
5997  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5998 
5999  // Saves result to pCtx->res.
6000  void ApplyDefragmentationMovesCpu(
6001  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6002  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6003  // Saves result to pCtx->res.
6004  void ApplyDefragmentationMovesGpu(
6005  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6006  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6007  VkCommandBuffer commandBuffer);
6008 
6009  /*
6010  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6011  - updated with new data.
6012  */
6013  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6014 };
6015 
6016 struct VmaPool_T
6017 {
6018  VMA_CLASS_NO_COPY(VmaPool_T)
6019 public:
6020  VmaBlockVector m_BlockVector;
6021 
6022  VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo,
6025  VkDeviceSize preferredBlockSize);
6026  ~VmaPool_T();
6027 
6028  uint32_t GetId() const { return m_Id; }
6029  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6030 
6031 #if VMA_STATS_STRING_ENABLED
6032  //void PrintDetailedMap(class VmaStringBuilder& sb);
6033 #endif
6034 
6035 private:
6036  uint32_t m_Id;
6037 };
6038 
6039 /*
6040 Performs defragmentation:
6041 
6042 - Updates `pBlockVector->m_pMetadata`.
6043 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6044 - Does not move actual data, only returns requested moves as `moves`.
6045 */
6046 class VmaDefragmentationAlgorithm
6047 {
6048  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6049 public:
6050  VmaDefragmentationAlgorithm(
6051  VmaAllocator hAllocator,
6052  VmaBlockVector* pBlockVector,
6053  uint32_t currentFrameIndex) :
6054  m_hAllocator(hAllocator),
6055  m_pBlockVector(pBlockVector),
6056  m_CurrentFrameIndex(currentFrameIndex)
6057  {
6058  }
6059  virtual ~VmaDefragmentationAlgorithm()
6060  {
6061  }
6062 
6063  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6064  virtual void AddAll() = 0;
6065 
6066  virtual VkResult Defragment(
6067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6068  VkDeviceSize maxBytesToMove,
6069  uint32_t maxAllocationsToMove) = 0;
6070 
6071  virtual VkDeviceSize GetBytesMoved() const = 0;
6072  virtual uint32_t GetAllocationsMoved() const = 0;
6073 
6074 protected:
6075  VmaAllocator const m_hAllocator;
6076  VmaBlockVector* const m_pBlockVector;
6077  const uint32_t m_CurrentFrameIndex;
6078 
6079  struct AllocationInfo
6080  {
6081  VmaAllocation m_hAllocation;
6082  VkBool32* m_pChanged;
6083 
6084  AllocationInfo() :
6085  m_hAllocation(VK_NULL_HANDLE),
6086  m_pChanged(VMA_NULL)
6087  {
6088  }
6089  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6090  m_hAllocation(hAlloc),
6091  m_pChanged(pChanged)
6092  {
6093  }
6094  };
6095 };
6096 
6097 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6098 {
6099  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6100 public:
6101  VmaDefragmentationAlgorithm_Generic(
6102  VmaAllocator hAllocator,
6103  VmaBlockVector* pBlockVector,
6104  uint32_t currentFrameIndex,
6105  bool overlappingMoveSupported);
6106  virtual ~VmaDefragmentationAlgorithm_Generic();
6107 
6108  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6109  virtual void AddAll() { m_AllAllocations = true; }
6110 
6111  virtual VkResult Defragment(
6112  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6113  VkDeviceSize maxBytesToMove,
6114  uint32_t maxAllocationsToMove);
6115 
6116  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6117  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6118 
6119 private:
6120  uint32_t m_AllocationCount;
6121  bool m_AllAllocations;
6122 
6123  VkDeviceSize m_BytesMoved;
6124  uint32_t m_AllocationsMoved;
6125 
6126  struct AllocationInfoSizeGreater
6127  {
6128  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6129  {
6130  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6131  }
6132  };
6133 
6134  struct AllocationInfoOffsetGreater
6135  {
6136  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6137  {
6138  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6139  }
6140  };
6141 
6142  struct BlockInfo
6143  {
6144  size_t m_OriginalBlockIndex;
6145  VmaDeviceMemoryBlock* m_pBlock;
6146  bool m_HasNonMovableAllocations;
6147  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6148 
6149  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6150  m_OriginalBlockIndex(SIZE_MAX),
6151  m_pBlock(VMA_NULL),
6152  m_HasNonMovableAllocations(true),
6153  m_Allocations(pAllocationCallbacks)
6154  {
6155  }
6156 
6157  void CalcHasNonMovableAllocations()
6158  {
6159  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6160  const size_t defragmentAllocCount = m_Allocations.size();
6161  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6162  }
6163 
6164  void SortAllocationsBySizeDescending()
6165  {
6166  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6167  }
6168 
6169  void SortAllocationsByOffsetDescending()
6170  {
6171  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6172  }
6173  };
6174 
6175  struct BlockPointerLess
6176  {
6177  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6178  {
6179  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6180  }
6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6182  {
6183  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6184  }
6185  };
6186 
6187  // 1. Blocks with some non-movable allocations go first.
6188  // 2. Blocks with smaller sumFreeSize go first.
6189  struct BlockInfoCompareMoveDestination
6190  {
6191  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6192  {
6193  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6194  {
6195  return true;
6196  }
6197  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6198  {
6199  return false;
6200  }
6201  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6202  {
6203  return true;
6204  }
6205  return false;
6206  }
6207  };
6208 
6209  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6210  BlockInfoVector m_Blocks;
6211 
6212  VkResult DefragmentRound(
6213  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6214  VkDeviceSize maxBytesToMove,
6215  uint32_t maxAllocationsToMove);
6216 
6217  size_t CalcBlocksWithNonMovableCount() const;
6218 
6219  static bool MoveMakesSense(
6220  size_t dstBlockIndex, VkDeviceSize dstOffset,
6221  size_t srcBlockIndex, VkDeviceSize srcOffset);
6222 };
6223 
6224 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6225 {
6226  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6227 public:
6228  VmaDefragmentationAlgorithm_Fast(
6229  VmaAllocator hAllocator,
6230  VmaBlockVector* pBlockVector,
6231  uint32_t currentFrameIndex,
6232  bool overlappingMoveSupported);
6233  virtual ~VmaDefragmentationAlgorithm_Fast();
6234 
6235  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6236  virtual void AddAll() { m_AllAllocations = true; }
6237 
6238  virtual VkResult Defragment(
6239  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6240  VkDeviceSize maxBytesToMove,
6241  uint32_t maxAllocationsToMove);
6242 
6243  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6244  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6245 
6246 private:
6247  struct BlockInfo
6248  {
6249  size_t origBlockIndex;
6250  };
6251 
6252  class FreeSpaceDatabase
6253  {
6254  public:
6255  FreeSpaceDatabase()
6256  {
6257  FreeSpace s = {};
6258  s.blockInfoIndex = SIZE_MAX;
6259  for(size_t i = 0; i < MAX_COUNT; ++i)
6260  {
6261  m_FreeSpaces[i] = s;
6262  }
6263  }
6264 
6265  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6266  {
6267  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6268  {
6269  return;
6270  }
6271 
6272  // Find first invalid or the smallest structure.
6273  size_t bestIndex = SIZE_MAX;
6274  for(size_t i = 0; i < MAX_COUNT; ++i)
6275  {
6276  // Empty structure.
6277  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6278  {
6279  bestIndex = i;
6280  break;
6281  }
6282  if(m_FreeSpaces[i].size < size &&
6283  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6284  {
6285  bestIndex = i;
6286  }
6287  }
6288 
6289  if(bestIndex != SIZE_MAX)
6290  {
6291  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6292  m_FreeSpaces[bestIndex].offset = offset;
6293  m_FreeSpaces[bestIndex].size = size;
6294  }
6295  }
6296 
6297  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6298  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6299  {
6300  size_t bestIndex = SIZE_MAX;
6301  VkDeviceSize bestFreeSpaceAfter = 0;
6302  for(size_t i = 0; i < MAX_COUNT; ++i)
6303  {
6304  // Structure is valid.
6305  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6306  {
6307  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6308  // Allocation fits into this structure.
6309  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6310  {
6311  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6312  (dstOffset + size);
6313  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6314  {
6315  bestIndex = i;
6316  bestFreeSpaceAfter = freeSpaceAfter;
6317  }
6318  }
6319  }
6320  }
6321 
6322  if(bestIndex != SIZE_MAX)
6323  {
6324  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6325  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6326 
6327  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6328  {
6329  // Leave this structure for remaining empty space.
6330  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6331  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6332  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6333  }
6334  else
6335  {
6336  // This structure becomes invalid.
6337  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6338  }
6339 
6340  return true;
6341  }
6342 
6343  return false;
6344  }
6345 
6346  private:
6347  static const size_t MAX_COUNT = 4;
6348 
6349  struct FreeSpace
6350  {
6351  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6352  VkDeviceSize offset;
6353  VkDeviceSize size;
6354  } m_FreeSpaces[MAX_COUNT];
6355  };
6356 
6357  const bool m_OverlappingMoveSupported;
6358 
6359  uint32_t m_AllocationCount;
6360  bool m_AllAllocations;
6361 
6362  VkDeviceSize m_BytesMoved;
6363  uint32_t m_AllocationsMoved;
6364 
6365  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6366 
6367  void PreprocessMetadata();
6368  void PostprocessMetadata();
6369  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6370 };
6371 
6372 struct VmaBlockDefragmentationContext
6373 {
6374  enum BLOCK_FLAG
6375  {
6376  BLOCK_FLAG_USED = 0x00000001,
6377  };
6378  uint32_t flags;
6379  VkBuffer hBuffer;
6380 
6381  VmaBlockDefragmentationContext() :
6382  flags(0),
6383  hBuffer(VK_NULL_HANDLE)
6384  {
6385  }
6386 };
6387 
6388 class VmaBlockVectorDefragmentationContext
6389 {
6390  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6391 public:
6392  VkResult res;
6393  bool mutexLocked;
6394  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6395 
6396  VmaBlockVectorDefragmentationContext(
6397  VmaAllocator hAllocator,
6398  VmaPool hCustomPool, // Optional.
6399  VmaBlockVector* pBlockVector,
6400  uint32_t currFrameIndex,
6401  uint32_t flags);
6402  ~VmaBlockVectorDefragmentationContext();
6403 
6404  VmaPool GetCustomPool() const { return m_hCustomPool; }
6405  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6406  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6407 
6408  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6409  void AddAll() { m_AllAllocations = true; }
6410 
6411  void Begin(bool overlappingMoveSupported);
6412 
6413 private:
6414  const VmaAllocator m_hAllocator;
6415  // Null if not from custom pool.
6416  const VmaPool m_hCustomPool;
6417  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6418  VmaBlockVector* const m_pBlockVector;
6419  const uint32_t m_CurrFrameIndex;
6420  const uint32_t m_AlgorithmFlags;
6421  // Owner of this object.
6422  VmaDefragmentationAlgorithm* m_pAlgorithm;
6423 
6424  struct AllocInfo
6425  {
6426  VmaAllocation hAlloc;
6427  VkBool32* pChanged;
6428  };
6429  // Used between constructor and Begin.
6430  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6431  bool m_AllAllocations;
6432 };
6433 
6434 struct VmaDefragmentationContext_T
6435 {
6436 private:
6437  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6438 public:
6439  VmaDefragmentationContext_T(
6440  VmaAllocator hAllocator,
6441  uint32_t currFrameIndex,
6442  uint32_t flags,
6443  VmaDefragmentationStats* pStats);
6444  ~VmaDefragmentationContext_T();
6445 
6446  void AddPools(uint32_t poolCount, VmaPool* pPools);
6447  void AddAllocations(
6448  uint32_t allocationCount,
6449  VmaAllocation* pAllocations,
6450  VkBool32* pAllocationsChanged);
6451 
6452  /*
6453  Returns:
6454  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6455  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6456  - Negative value if error occured and object can be destroyed immediately.
6457  */
6458  VkResult Defragment(
6459  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6460  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6461  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6462 
6463 private:
6464  const VmaAllocator m_hAllocator;
6465  const uint32_t m_CurrFrameIndex;
6466  const uint32_t m_Flags;
6467  VmaDefragmentationStats* const m_pStats;
6468  // Owner of these objects.
6469  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6470  // Owner of these objects.
6471  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6472 };
6473 
6474 #if VMA_RECORDING_ENABLED
6475 
6476 class VmaRecorder
6477 {
6478 public:
6479  VmaRecorder();
6480  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6481  void WriteConfiguration(
6482  const VkPhysicalDeviceProperties& devProps,
6483  const VkPhysicalDeviceMemoryProperties& memProps,
6484  bool dedicatedAllocationExtensionEnabled);
6485  ~VmaRecorder();
6486 
6487  void RecordCreateAllocator(uint32_t frameIndex);
6488  void RecordDestroyAllocator(uint32_t frameIndex);
6489  void RecordCreatePool(uint32_t frameIndex,
6490  const VmaPoolCreateInfo& createInfo,
6491  VmaPool pool);
6492  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6493  void RecordAllocateMemory(uint32_t frameIndex,
6494  const VkMemoryRequirements& vkMemReq,
6495  const VmaAllocationCreateInfo& createInfo,
6496  VmaAllocation allocation);
6497  void RecordAllocateMemoryPages(uint32_t frameIndex,
6498  const VkMemoryRequirements& vkMemReq,
6499  const VmaAllocationCreateInfo& createInfo,
6500  uint64_t allocationCount,
6501  const VmaAllocation* pAllocations);
6502  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6503  const VkMemoryRequirements& vkMemReq,
6504  bool requiresDedicatedAllocation,
6505  bool prefersDedicatedAllocation,
6506  const VmaAllocationCreateInfo& createInfo,
6507  VmaAllocation allocation);
6508  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6509  const VkMemoryRequirements& vkMemReq,
6510  bool requiresDedicatedAllocation,
6511  bool prefersDedicatedAllocation,
6512  const VmaAllocationCreateInfo& createInfo,
6513  VmaAllocation allocation);
6514  void RecordFreeMemory(uint32_t frameIndex,
6515  VmaAllocation allocation);
6516  void RecordFreeMemoryPages(uint32_t frameIndex,
6517  uint64_t allocationCount,
6518  const VmaAllocation* pAllocations);
6519  void RecordResizeAllocation(
6520  uint32_t frameIndex,
6521  VmaAllocation allocation,
6522  VkDeviceSize newSize);
6523  void RecordSetAllocationUserData(uint32_t frameIndex,
6524  VmaAllocation allocation,
6525  const void* pUserData);
6526  void RecordCreateLostAllocation(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordMapMemory(uint32_t frameIndex,
6529  VmaAllocation allocation);
6530  void RecordUnmapMemory(uint32_t frameIndex,
6531  VmaAllocation allocation);
6532  void RecordFlushAllocation(uint32_t frameIndex,
6533  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6534  void RecordInvalidateAllocation(uint32_t frameIndex,
6535  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6536  void RecordCreateBuffer(uint32_t frameIndex,
6537  const VkBufferCreateInfo& bufCreateInfo,
6538  const VmaAllocationCreateInfo& allocCreateInfo,
6539  VmaAllocation allocation);
6540  void RecordCreateImage(uint32_t frameIndex,
6541  const VkImageCreateInfo& imageCreateInfo,
6542  const VmaAllocationCreateInfo& allocCreateInfo,
6543  VmaAllocation allocation);
6544  void RecordDestroyBuffer(uint32_t frameIndex,
6545  VmaAllocation allocation);
6546  void RecordDestroyImage(uint32_t frameIndex,
6547  VmaAllocation allocation);
6548  void RecordTouchAllocation(uint32_t frameIndex,
6549  VmaAllocation allocation);
6550  void RecordGetAllocationInfo(uint32_t frameIndex,
6551  VmaAllocation allocation);
6552  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6553  VmaPool pool);
6554  void RecordDefragmentationBegin(uint32_t frameIndex,
6555  const VmaDefragmentationInfo2& info,
6557  void RecordDefragmentationEnd(uint32_t frameIndex,
6559 
6560 private:
6561  struct CallParams
6562  {
6563  uint32_t threadId;
6564  double time;
6565  };
6566 
6567  class UserDataString
6568  {
6569  public:
6570  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6571  const char* GetString() const { return m_Str; }
6572 
6573  private:
6574  char m_PtrStr[17];
6575  const char* m_Str;
6576  };
6577 
6578  bool m_UseMutex;
6579  VmaRecordFlags m_Flags;
6580  FILE* m_File;
6581  VMA_MUTEX m_FileMutex;
6582  int64_t m_Freq;
6583  int64_t m_StartCounter;
6584 
6585  void GetBasicParams(CallParams& outParams);
6586 
6587  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6588  template<typename T>
6589  void PrintPointerList(uint64_t count, const T* pItems)
6590  {
6591  if(count)
6592  {
6593  fprintf(m_File, "%p", pItems[0]);
6594  for(uint64_t i = 1; i < count; ++i)
6595  {
6596  fprintf(m_File, " %p", pItems[i]);
6597  }
6598  }
6599  }
6600 
6601  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6602  void Flush();
6603 };
6604 
6605 #endif // #if VMA_RECORDING_ENABLED
6606 
6607 /*
6608 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6609 */
6610 class VmaAllocationObjectAllocator
6611 {
6612  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6613 public:
6614  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6615 
6616  VmaAllocation Allocate();
6617  void Free(VmaAllocation hAlloc);
6618 
6619 private:
6620  VMA_MUTEX m_Mutex;
6621  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6622 };
6623 
6624 // Main allocator object.
6625 struct VmaAllocator_T
6626 {
6627  VMA_CLASS_NO_COPY(VmaAllocator_T)
6628 public:
6629  bool m_UseMutex;
6630  bool m_UseKhrDedicatedAllocation;
6631  VkDevice m_hDevice;
6632  bool m_AllocationCallbacksSpecified;
6633  VkAllocationCallbacks m_AllocationCallbacks;
6634  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6635  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6636 
6637  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6638  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6639  VMA_MUTEX m_HeapSizeLimitMutex;
6640 
6641  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6642  VkPhysicalDeviceMemoryProperties m_MemProps;
6643 
6644  // Default pools.
6645  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6646 
6647  // Each vector is sorted by memory (handle value).
6648  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6649  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6650  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6651 
6652  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6653  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6654  ~VmaAllocator_T();
6655 
6656  const VkAllocationCallbacks* GetAllocationCallbacks() const
6657  {
6658  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6659  }
6660  const VmaVulkanFunctions& GetVulkanFunctions() const
6661  {
6662  return m_VulkanFunctions;
6663  }
6664 
6665  VkDeviceSize GetBufferImageGranularity() const
6666  {
6667  return VMA_MAX(
6668  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6669  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6670  }
6671 
6672  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6673  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6674 
6675  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6676  {
6677  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6678  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6679  }
6680  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6681  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6682  {
6683  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6684  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6685  }
6686  // Minimum alignment for all allocations in specific memory type.
6687  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6688  {
6689  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6690  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6691  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6692  }
6693 
6694  bool IsIntegratedGpu() const
6695  {
6696  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6697  }
6698 
6699 #if VMA_RECORDING_ENABLED
6700  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6701 #endif
6702 
6703  void GetBufferMemoryRequirements(
6704  VkBuffer hBuffer,
6705  VkMemoryRequirements& memReq,
6706  bool& requiresDedicatedAllocation,
6707  bool& prefersDedicatedAllocation) const;
6708  void GetImageMemoryRequirements(
6709  VkImage hImage,
6710  VkMemoryRequirements& memReq,
6711  bool& requiresDedicatedAllocation,
6712  bool& prefersDedicatedAllocation) const;
6713 
6714  // Main allocation function.
6715  VkResult AllocateMemory(
6716  const VkMemoryRequirements& vkMemReq,
6717  bool requiresDedicatedAllocation,
6718  bool prefersDedicatedAllocation,
6719  VkBuffer dedicatedBuffer,
6720  VkImage dedicatedImage,
6721  const VmaAllocationCreateInfo& createInfo,
6722  VmaSuballocationType suballocType,
6723  size_t allocationCount,
6724  VmaAllocation* pAllocations);
6725 
6726  // Main deallocation function.
6727  void FreeMemory(
6728  size_t allocationCount,
6729  const VmaAllocation* pAllocations);
6730 
6731  VkResult ResizeAllocation(
6732  const VmaAllocation alloc,
6733  VkDeviceSize newSize);
6734 
6735  void CalculateStats(VmaStats* pStats);
6736 
6737 #if VMA_STATS_STRING_ENABLED
6738  void PrintDetailedMap(class VmaJsonWriter& json);
6739 #endif
6740 
6741  VkResult DefragmentationBegin(
6742  const VmaDefragmentationInfo2& info,
6743  VmaDefragmentationStats* pStats,
6744  VmaDefragmentationContext* pContext);
6745  VkResult DefragmentationEnd(
6746  VmaDefragmentationContext context);
6747 
6748  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6749  bool TouchAllocation(VmaAllocation hAllocation);
6750 
6751  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6752  void DestroyPool(VmaPool pool);
6753  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6754 
6755  void SetCurrentFrameIndex(uint32_t frameIndex);
6756  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6757 
6758  void MakePoolAllocationsLost(
6759  VmaPool hPool,
6760  size_t* pLostAllocationCount);
6761  VkResult CheckPoolCorruption(VmaPool hPool);
6762  VkResult CheckCorruption(uint32_t memoryTypeBits);
6763 
6764  void CreateLostAllocation(VmaAllocation* pAllocation);
6765 
6766  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6767  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6768 
6769  VkResult Map(VmaAllocation hAllocation, void** ppData);
6770  void Unmap(VmaAllocation hAllocation);
6771 
6772  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6773  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6774 
6775  void FlushOrInvalidateAllocation(
6776  VmaAllocation hAllocation,
6777  VkDeviceSize offset, VkDeviceSize size,
6778  VMA_CACHE_OPERATION op);
6779 
6780  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6781 
6782  /*
6783  Returns bit mask of memory types that can support defragmentation on GPU as
6784  they support creation of required buffer for copy operations.
6785  */
6786  uint32_t GetGpuDefragmentationMemoryTypeBits();
6787 
6788 private:
6789  VkDeviceSize m_PreferredLargeHeapBlockSize;
6790 
6791  VkPhysicalDevice m_PhysicalDevice;
6792  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6793  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6794 
6795  VMA_RW_MUTEX m_PoolsMutex;
6796  // Protected by m_PoolsMutex. Sorted by pointer value.
6797  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6798  uint32_t m_NextPoolId;
6799 
6800  VmaVulkanFunctions m_VulkanFunctions;
6801 
6802 #if VMA_RECORDING_ENABLED
6803  VmaRecorder* m_pRecorder;
6804 #endif
6805 
6806  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6807 
6808  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6809 
6810  VkResult AllocateMemoryOfType(
6811  VkDeviceSize size,
6812  VkDeviceSize alignment,
6813  bool dedicatedAllocation,
6814  VkBuffer dedicatedBuffer,
6815  VkImage dedicatedImage,
6816  const VmaAllocationCreateInfo& createInfo,
6817  uint32_t memTypeIndex,
6818  VmaSuballocationType suballocType,
6819  size_t allocationCount,
6820  VmaAllocation* pAllocations);
6821 
6822  // Helper function only to be used inside AllocateDedicatedMemory.
6823  VkResult AllocateDedicatedMemoryPage(
6824  VkDeviceSize size,
6825  VmaSuballocationType suballocType,
6826  uint32_t memTypeIndex,
6827  const VkMemoryAllocateInfo& allocInfo,
6828  bool map,
6829  bool isUserDataString,
6830  void* pUserData,
6831  VmaAllocation* pAllocation);
6832 
6833  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6834  VkResult AllocateDedicatedMemory(
6835  VkDeviceSize size,
6836  VmaSuballocationType suballocType,
6837  uint32_t memTypeIndex,
6838  bool map,
6839  bool isUserDataString,
6840  void* pUserData,
6841  VkBuffer dedicatedBuffer,
6842  VkImage dedicatedImage,
6843  size_t allocationCount,
6844  VmaAllocation* pAllocations);
6845 
6846  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6847  void FreeDedicatedMemory(VmaAllocation allocation);
6848 
6849  /*
6850  Calculates and returns bit mask of memory types that can support defragmentation
6851  on GPU as they support creation of required buffer for copy operations.
6852  */
6853  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6854 };
6855 
6857 // Memory allocation #2 after VmaAllocator_T definition
6858 
6859 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6860 {
6861  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6862 }
6863 
6864 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6865 {
6866  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6867 }
6868 
6869 template<typename T>
6870 static T* VmaAllocate(VmaAllocator hAllocator)
6871 {
6872  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6873 }
6874 
6875 template<typename T>
6876 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6877 {
6878  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6879 }
6880 
6881 template<typename T>
6882 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6883 {
6884  if(ptr != VMA_NULL)
6885  {
6886  ptr->~T();
6887  VmaFree(hAllocator, ptr);
6888  }
6889 }
6890 
6891 template<typename T>
6892 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6893 {
6894  if(ptr != VMA_NULL)
6895  {
6896  for(size_t i = count; i--; )
6897  ptr[i].~T();
6898  VmaFree(hAllocator, ptr);
6899  }
6900 }
6901 
6903 // VmaStringBuilder
6904 
6905 #if VMA_STATS_STRING_ENABLED
6906 
6907 class VmaStringBuilder
6908 {
6909 public:
6910  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6911  size_t GetLength() const { return m_Data.size(); }
6912  const char* GetData() const { return m_Data.data(); }
6913 
6914  void Add(char ch) { m_Data.push_back(ch); }
6915  void Add(const char* pStr);
6916  void AddNewLine() { Add('\n'); }
6917  void AddNumber(uint32_t num);
6918  void AddNumber(uint64_t num);
6919  void AddPointer(const void* ptr);
6920 
6921 private:
6922  VmaVector< char, VmaStlAllocator<char> > m_Data;
6923 };
6924 
6925 void VmaStringBuilder::Add(const char* pStr)
6926 {
6927  const size_t strLen = strlen(pStr);
6928  if(strLen > 0)
6929  {
6930  const size_t oldCount = m_Data.size();
6931  m_Data.resize(oldCount + strLen);
6932  memcpy(m_Data.data() + oldCount, pStr, strLen);
6933  }
6934 }
6935 
6936 void VmaStringBuilder::AddNumber(uint32_t num)
6937 {
6938  char buf[11];
6939  VmaUint32ToStr(buf, sizeof(buf), num);
6940  Add(buf);
6941 }
6942 
6943 void VmaStringBuilder::AddNumber(uint64_t num)
6944 {
6945  char buf[21];
6946  VmaUint64ToStr(buf, sizeof(buf), num);
6947  Add(buf);
6948 }
6949 
6950 void VmaStringBuilder::AddPointer(const void* ptr)
6951 {
6952  char buf[21];
6953  VmaPtrToStr(buf, sizeof(buf), ptr);
6954  Add(buf);
6955 }
6956 
6957 #endif // #if VMA_STATS_STRING_ENABLED
6958 
6960 // VmaJsonWriter
6961 
6962 #if VMA_STATS_STRING_ENABLED
6963 
6964 class VmaJsonWriter
6965 {
6966  VMA_CLASS_NO_COPY(VmaJsonWriter)
6967 public:
6968  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6969  ~VmaJsonWriter();
6970 
6971  void BeginObject(bool singleLine = false);
6972  void EndObject();
6973 
6974  void BeginArray(bool singleLine = false);
6975  void EndArray();
6976 
6977  void WriteString(const char* pStr);
6978  void BeginString(const char* pStr = VMA_NULL);
6979  void ContinueString(const char* pStr);
6980  void ContinueString(uint32_t n);
6981  void ContinueString(uint64_t n);
6982  void ContinueString_Pointer(const void* ptr);
6983  void EndString(const char* pStr = VMA_NULL);
6984 
6985  void WriteNumber(uint32_t n);
6986  void WriteNumber(uint64_t n);
6987  void WriteBool(bool b);
6988  void WriteNull();
6989 
6990 private:
6991  static const char* const INDENT;
6992 
6993  enum COLLECTION_TYPE
6994  {
6995  COLLECTION_TYPE_OBJECT,
6996  COLLECTION_TYPE_ARRAY,
6997  };
6998  struct StackItem
6999  {
7000  COLLECTION_TYPE type;
7001  uint32_t valueCount;
7002  bool singleLineMode;
7003  };
7004 
7005  VmaStringBuilder& m_SB;
7006  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7007  bool m_InsideString;
7008 
7009  void BeginValue(bool isString);
7010  void WriteIndent(bool oneLess = false);
7011 };
7012 
7013 const char* const VmaJsonWriter::INDENT = " ";
7014 
7015 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7016  m_SB(sb),
7017  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7018  m_InsideString(false)
7019 {
7020 }
7021 
7022 VmaJsonWriter::~VmaJsonWriter()
7023 {
7024  VMA_ASSERT(!m_InsideString);
7025  VMA_ASSERT(m_Stack.empty());
7026 }
7027 
7028 void VmaJsonWriter::BeginObject(bool singleLine)
7029 {
7030  VMA_ASSERT(!m_InsideString);
7031 
7032  BeginValue(false);
7033  m_SB.Add('{');
7034 
7035  StackItem item;
7036  item.type = COLLECTION_TYPE_OBJECT;
7037  item.valueCount = 0;
7038  item.singleLineMode = singleLine;
7039  m_Stack.push_back(item);
7040 }
7041 
7042 void VmaJsonWriter::EndObject()
7043 {
7044  VMA_ASSERT(!m_InsideString);
7045 
7046  WriteIndent(true);
7047  m_SB.Add('}');
7048 
7049  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7050  m_Stack.pop_back();
7051 }
7052 
7053 void VmaJsonWriter::BeginArray(bool singleLine)
7054 {
7055  VMA_ASSERT(!m_InsideString);
7056 
7057  BeginValue(false);
7058  m_SB.Add('[');
7059 
7060  StackItem item;
7061  item.type = COLLECTION_TYPE_ARRAY;
7062  item.valueCount = 0;
7063  item.singleLineMode = singleLine;
7064  m_Stack.push_back(item);
7065 }
7066 
7067 void VmaJsonWriter::EndArray()
7068 {
7069  VMA_ASSERT(!m_InsideString);
7070 
7071  WriteIndent(true);
7072  m_SB.Add(']');
7073 
7074  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7075  m_Stack.pop_back();
7076 }
7077 
7078 void VmaJsonWriter::WriteString(const char* pStr)
7079 {
7080  BeginString(pStr);
7081  EndString();
7082 }
7083 
7084 void VmaJsonWriter::BeginString(const char* pStr)
7085 {
7086  VMA_ASSERT(!m_InsideString);
7087 
7088  BeginValue(true);
7089  m_SB.Add('"');
7090  m_InsideString = true;
7091  if(pStr != VMA_NULL && pStr[0] != '\0')
7092  {
7093  ContinueString(pStr);
7094  }
7095 }
7096 
7097 void VmaJsonWriter::ContinueString(const char* pStr)
7098 {
7099  VMA_ASSERT(m_InsideString);
7100 
7101  const size_t strLen = strlen(pStr);
7102  for(size_t i = 0; i < strLen; ++i)
7103  {
7104  char ch = pStr[i];
7105  if(ch == '\\')
7106  {
7107  m_SB.Add("\\\\");
7108  }
7109  else if(ch == '"')
7110  {
7111  m_SB.Add("\\\"");
7112  }
7113  else if(ch >= 32)
7114  {
7115  m_SB.Add(ch);
7116  }
7117  else switch(ch)
7118  {
7119  case '\b':
7120  m_SB.Add("\\b");
7121  break;
7122  case '\f':
7123  m_SB.Add("\\f");
7124  break;
7125  case '\n':
7126  m_SB.Add("\\n");
7127  break;
7128  case '\r':
7129  m_SB.Add("\\r");
7130  break;
7131  case '\t':
7132  m_SB.Add("\\t");
7133  break;
7134  default:
7135  VMA_ASSERT(0 && "Character not currently supported.");
7136  break;
7137  }
7138  }
7139 }
7140 
7141 void VmaJsonWriter::ContinueString(uint32_t n)
7142 {
7143  VMA_ASSERT(m_InsideString);
7144  m_SB.AddNumber(n);
7145 }
7146 
7147 void VmaJsonWriter::ContinueString(uint64_t n)
7148 {
7149  VMA_ASSERT(m_InsideString);
7150  m_SB.AddNumber(n);
7151 }
7152 
7153 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7154 {
7155  VMA_ASSERT(m_InsideString);
7156  m_SB.AddPointer(ptr);
7157 }
7158 
7159 void VmaJsonWriter::EndString(const char* pStr)
7160 {
7161  VMA_ASSERT(m_InsideString);
7162  if(pStr != VMA_NULL && pStr[0] != '\0')
7163  {
7164  ContinueString(pStr);
7165  }
7166  m_SB.Add('"');
7167  m_InsideString = false;
7168 }
7169 
7170 void VmaJsonWriter::WriteNumber(uint32_t n)
7171 {
7172  VMA_ASSERT(!m_InsideString);
7173  BeginValue(false);
7174  m_SB.AddNumber(n);
7175 }
7176 
7177 void VmaJsonWriter::WriteNumber(uint64_t n)
7178 {
7179  VMA_ASSERT(!m_InsideString);
7180  BeginValue(false);
7181  m_SB.AddNumber(n);
7182 }
7183 
7184 void VmaJsonWriter::WriteBool(bool b)
7185 {
7186  VMA_ASSERT(!m_InsideString);
7187  BeginValue(false);
7188  m_SB.Add(b ? "true" : "false");
7189 }
7190 
7191 void VmaJsonWriter::WriteNull()
7192 {
7193  VMA_ASSERT(!m_InsideString);
7194  BeginValue(false);
7195  m_SB.Add("null");
7196 }
7197 
7198 void VmaJsonWriter::BeginValue(bool isString)
7199 {
7200  if(!m_Stack.empty())
7201  {
7202  StackItem& currItem = m_Stack.back();
7203  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7204  currItem.valueCount % 2 == 0)
7205  {
7206  VMA_ASSERT(isString);
7207  }
7208 
7209  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7210  currItem.valueCount % 2 != 0)
7211  {
7212  m_SB.Add(": ");
7213  }
7214  else if(currItem.valueCount > 0)
7215  {
7216  m_SB.Add(", ");
7217  WriteIndent();
7218  }
7219  else
7220  {
7221  WriteIndent();
7222  }
7223  ++currItem.valueCount;
7224  }
7225 }
7226 
7227 void VmaJsonWriter::WriteIndent(bool oneLess)
7228 {
7229  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7230  {
7231  m_SB.AddNewLine();
7232 
7233  size_t count = m_Stack.size();
7234  if(count > 0 && oneLess)
7235  {
7236  --count;
7237  }
7238  for(size_t i = 0; i < count; ++i)
7239  {
7240  m_SB.Add(INDENT);
7241  }
7242  }
7243 }
7244 
7245 #endif // #if VMA_STATS_STRING_ENABLED
7246 
7248 
7249 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7250 {
7251  if(IsUserDataString())
7252  {
7253  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7254 
7255  FreeUserDataString(hAllocator);
7256 
7257  if(pUserData != VMA_NULL)
7258  {
7259  const char* const newStrSrc = (char*)pUserData;
7260  const size_t newStrLen = strlen(newStrSrc);
7261  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7262  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7263  m_pUserData = newStrDst;
7264  }
7265  }
7266  else
7267  {
7268  m_pUserData = pUserData;
7269  }
7270 }
7271 
7272 void VmaAllocation_T::ChangeBlockAllocation(
7273  VmaAllocator hAllocator,
7274  VmaDeviceMemoryBlock* block,
7275  VkDeviceSize offset)
7276 {
7277  VMA_ASSERT(block != VMA_NULL);
7278  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7279 
7280  // Move mapping reference counter from old block to new block.
7281  if(block != m_BlockAllocation.m_Block)
7282  {
7283  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7284  if(IsPersistentMap())
7285  ++mapRefCount;
7286  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7287  block->Map(hAllocator, mapRefCount, VMA_NULL);
7288  }
7289 
7290  m_BlockAllocation.m_Block = block;
7291  m_BlockAllocation.m_Offset = offset;
7292 }
7293 
7294 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7295 {
7296  VMA_ASSERT(newSize > 0);
7297  m_Size = newSize;
7298 }
7299 
7300 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7301 {
7302  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7303  m_BlockAllocation.m_Offset = newOffset;
7304 }
7305 
7306 VkDeviceSize VmaAllocation_T::GetOffset() const
7307 {
7308  switch(m_Type)
7309  {
7310  case ALLOCATION_TYPE_BLOCK:
7311  return m_BlockAllocation.m_Offset;
7312  case ALLOCATION_TYPE_DEDICATED:
7313  return 0;
7314  default:
7315  VMA_ASSERT(0);
7316  return 0;
7317  }
7318 }
7319 
7320 VkDeviceMemory VmaAllocation_T::GetMemory() const
7321 {
7322  switch(m_Type)
7323  {
7324  case ALLOCATION_TYPE_BLOCK:
7325  return m_BlockAllocation.m_Block->GetDeviceMemory();
7326  case ALLOCATION_TYPE_DEDICATED:
7327  return m_DedicatedAllocation.m_hMemory;
7328  default:
7329  VMA_ASSERT(0);
7330  return VK_NULL_HANDLE;
7331  }
7332 }
7333 
7334 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7335 {
7336  switch(m_Type)
7337  {
7338  case ALLOCATION_TYPE_BLOCK:
7339  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7340  case ALLOCATION_TYPE_DEDICATED:
7341  return m_DedicatedAllocation.m_MemoryTypeIndex;
7342  default:
7343  VMA_ASSERT(0);
7344  return UINT32_MAX;
7345  }
7346 }
7347 
7348 void* VmaAllocation_T::GetMappedData() const
7349 {
7350  switch(m_Type)
7351  {
7352  case ALLOCATION_TYPE_BLOCK:
7353  if(m_MapCount != 0)
7354  {
7355  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7356  VMA_ASSERT(pBlockData != VMA_NULL);
7357  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7358  }
7359  else
7360  {
7361  return VMA_NULL;
7362  }
7363  break;
7364  case ALLOCATION_TYPE_DEDICATED:
7365  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7366  return m_DedicatedAllocation.m_pMappedData;
7367  default:
7368  VMA_ASSERT(0);
7369  return VMA_NULL;
7370  }
7371 }
7372 
7373 bool VmaAllocation_T::CanBecomeLost() const
7374 {
7375  switch(m_Type)
7376  {
7377  case ALLOCATION_TYPE_BLOCK:
7378  return m_BlockAllocation.m_CanBecomeLost;
7379  case ALLOCATION_TYPE_DEDICATED:
7380  return false;
7381  default:
7382  VMA_ASSERT(0);
7383  return false;
7384  }
7385 }
7386 
7387 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7388 {
7389  VMA_ASSERT(CanBecomeLost());
7390 
7391  /*
7392  Warning: This is a carefully designed algorithm.
7393  Do not modify unless you really know what you're doing :)
7394  */
7395  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7396  for(;;)
7397  {
7398  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7399  {
7400  VMA_ASSERT(0);
7401  return false;
7402  }
7403  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7404  {
7405  return false;
7406  }
7407  else // Last use time earlier than current time.
7408  {
7409  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7410  {
7411  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7412  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7413  return true;
7414  }
7415  }
7416  }
7417 }
7418 
7419 #if VMA_STATS_STRING_ENABLED
7420 
7421 // Correspond to values of enum VmaSuballocationType.
7422 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7423  "FREE",
7424  "UNKNOWN",
7425  "BUFFER",
7426  "IMAGE_UNKNOWN",
7427  "IMAGE_LINEAR",
7428  "IMAGE_OPTIMAL",
7429 };
7430 
7431 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7432 {
7433  json.WriteString("Type");
7434  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7435 
7436  json.WriteString("Size");
7437  json.WriteNumber(m_Size);
7438 
7439  if(m_pUserData != VMA_NULL)
7440  {
7441  json.WriteString("UserData");
7442  if(IsUserDataString())
7443  {
7444  json.WriteString((const char*)m_pUserData);
7445  }
7446  else
7447  {
7448  json.BeginString();
7449  json.ContinueString_Pointer(m_pUserData);
7450  json.EndString();
7451  }
7452  }
7453 
7454  json.WriteString("CreationFrameIndex");
7455  json.WriteNumber(m_CreationFrameIndex);
7456 
7457  json.WriteString("LastUseFrameIndex");
7458  json.WriteNumber(GetLastUseFrameIndex());
7459 
7460  if(m_BufferImageUsage != 0)
7461  {
7462  json.WriteString("Usage");
7463  json.WriteNumber(m_BufferImageUsage);
7464  }
7465 }
7466 
7467 #endif
7468 
7469 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7470 {
7471  VMA_ASSERT(IsUserDataString());
7472  if(m_pUserData != VMA_NULL)
7473  {
7474  char* const oldStr = (char*)m_pUserData;
7475  const size_t oldStrLen = strlen(oldStr);
7476  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7477  m_pUserData = VMA_NULL;
7478  }
7479 }
7480 
7481 void VmaAllocation_T::BlockAllocMap()
7482 {
7483  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7484 
7485  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7486  {
7487  ++m_MapCount;
7488  }
7489  else
7490  {
7491  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7492  }
7493 }
7494 
7495 void VmaAllocation_T::BlockAllocUnmap()
7496 {
7497  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7498 
7499  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7500  {
7501  --m_MapCount;
7502  }
7503  else
7504  {
7505  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7506  }
7507 }
7508 
7509 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7510 {
7511  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7512 
7513  if(m_MapCount != 0)
7514  {
7515  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7516  {
7517  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7518  *ppData = m_DedicatedAllocation.m_pMappedData;
7519  ++m_MapCount;
7520  return VK_SUCCESS;
7521  }
7522  else
7523  {
7524  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7525  return VK_ERROR_MEMORY_MAP_FAILED;
7526  }
7527  }
7528  else
7529  {
7530  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7531  hAllocator->m_hDevice,
7532  m_DedicatedAllocation.m_hMemory,
7533  0, // offset
7534  VK_WHOLE_SIZE,
7535  0, // flags
7536  ppData);
7537  if(result == VK_SUCCESS)
7538  {
7539  m_DedicatedAllocation.m_pMappedData = *ppData;
7540  m_MapCount = 1;
7541  }
7542  return result;
7543  }
7544 }
7545 
7546 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7547 {
7548  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7549 
7550  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7551  {
7552  --m_MapCount;
7553  if(m_MapCount == 0)
7554  {
7555  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7556  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7557  hAllocator->m_hDevice,
7558  m_DedicatedAllocation.m_hMemory);
7559  }
7560  }
7561  else
7562  {
7563  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7564  }
7565 }
7566 
7567 #if VMA_STATS_STRING_ENABLED
7568 
7569 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7570 {
7571  json.BeginObject();
7572 
7573  json.WriteString("Blocks");
7574  json.WriteNumber(stat.blockCount);
7575 
7576  json.WriteString("Allocations");
7577  json.WriteNumber(stat.allocationCount);
7578 
7579  json.WriteString("UnusedRanges");
7580  json.WriteNumber(stat.unusedRangeCount);
7581 
7582  json.WriteString("UsedBytes");
7583  json.WriteNumber(stat.usedBytes);
7584 
7585  json.WriteString("UnusedBytes");
7586  json.WriteNumber(stat.unusedBytes);
7587 
7588  if(stat.allocationCount > 1)
7589  {
7590  json.WriteString("AllocationSize");
7591  json.BeginObject(true);
7592  json.WriteString("Min");
7593  json.WriteNumber(stat.allocationSizeMin);
7594  json.WriteString("Avg");
7595  json.WriteNumber(stat.allocationSizeAvg);
7596  json.WriteString("Max");
7597  json.WriteNumber(stat.allocationSizeMax);
7598  json.EndObject();
7599  }
7600 
7601  if(stat.unusedRangeCount > 1)
7602  {
7603  json.WriteString("UnusedRangeSize");
7604  json.BeginObject(true);
7605  json.WriteString("Min");
7606  json.WriteNumber(stat.unusedRangeSizeMin);
7607  json.WriteString("Avg");
7608  json.WriteNumber(stat.unusedRangeSizeAvg);
7609  json.WriteString("Max");
7610  json.WriteNumber(stat.unusedRangeSizeMax);
7611  json.EndObject();
7612  }
7613 
7614  json.EndObject();
7615 }
7616 
7617 #endif // #if VMA_STATS_STRING_ENABLED
7618 
7619 struct VmaSuballocationItemSizeLess
7620 {
7621  bool operator()(
7622  const VmaSuballocationList::iterator lhs,
7623  const VmaSuballocationList::iterator rhs) const
7624  {
7625  return lhs->size < rhs->size;
7626  }
7627  bool operator()(
7628  const VmaSuballocationList::iterator lhs,
7629  VkDeviceSize rhsSize) const
7630  {
7631  return lhs->size < rhsSize;
7632  }
7633 };
7634 
7635 
7637 // class VmaBlockMetadata
7638 
7639 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7640  m_Size(0),
7641  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7642 {
7643 }
7644 
7645 #if VMA_STATS_STRING_ENABLED
7646 
7647 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7648  VkDeviceSize unusedBytes,
7649  size_t allocationCount,
7650  size_t unusedRangeCount) const
7651 {
7652  json.BeginObject();
7653 
7654  json.WriteString("TotalBytes");
7655  json.WriteNumber(GetSize());
7656 
7657  json.WriteString("UnusedBytes");
7658  json.WriteNumber(unusedBytes);
7659 
7660  json.WriteString("Allocations");
7661  json.WriteNumber((uint64_t)allocationCount);
7662 
7663  json.WriteString("UnusedRanges");
7664  json.WriteNumber((uint64_t)unusedRangeCount);
7665 
7666  json.WriteString("Suballocations");
7667  json.BeginArray();
7668 }
7669 
7670 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7671  VkDeviceSize offset,
7672  VmaAllocation hAllocation) const
7673 {
7674  json.BeginObject(true);
7675 
7676  json.WriteString("Offset");
7677  json.WriteNumber(offset);
7678 
7679  hAllocation->PrintParameters(json);
7680 
7681  json.EndObject();
7682 }
7683 
7684 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7685  VkDeviceSize offset,
7686  VkDeviceSize size) const
7687 {
7688  json.BeginObject(true);
7689 
7690  json.WriteString("Offset");
7691  json.WriteNumber(offset);
7692 
7693  json.WriteString("Type");
7694  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7695 
7696  json.WriteString("Size");
7697  json.WriteNumber(size);
7698 
7699  json.EndObject();
7700 }
7701 
7702 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7703 {
7704  json.EndArray();
7705  json.EndObject();
7706 }
7707 
7708 #endif // #if VMA_STATS_STRING_ENABLED
7709 
7711 // class VmaBlockMetadata_Generic
7712 
7713 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7714  VmaBlockMetadata(hAllocator),
7715  m_FreeCount(0),
7716  m_SumFreeSize(0),
7717  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7718  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7719 {
7720 }
7721 
7722 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7723 {
7724 }
7725 
7726 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7727 {
7728  VmaBlockMetadata::Init(size);
7729 
7730  m_FreeCount = 1;
7731  m_SumFreeSize = size;
7732 
7733  VmaSuballocation suballoc = {};
7734  suballoc.offset = 0;
7735  suballoc.size = size;
7736  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7737  suballoc.hAllocation = VK_NULL_HANDLE;
7738 
7739  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7740  m_Suballocations.push_back(suballoc);
7741  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7742  --suballocItem;
7743  m_FreeSuballocationsBySize.push_back(suballocItem);
7744 }
7745 
7746 bool VmaBlockMetadata_Generic::Validate() const
7747 {
7748  VMA_VALIDATE(!m_Suballocations.empty());
7749 
7750  // Expected offset of new suballocation as calculated from previous ones.
7751  VkDeviceSize calculatedOffset = 0;
7752  // Expected number of free suballocations as calculated from traversing their list.
7753  uint32_t calculatedFreeCount = 0;
7754  // Expected sum size of free suballocations as calculated from traversing their list.
7755  VkDeviceSize calculatedSumFreeSize = 0;
7756  // Expected number of free suballocations that should be registered in
7757  // m_FreeSuballocationsBySize calculated from traversing their list.
7758  size_t freeSuballocationsToRegister = 0;
7759  // True if previous visited suballocation was free.
7760  bool prevFree = false;
7761 
7762  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7763  suballocItem != m_Suballocations.cend();
7764  ++suballocItem)
7765  {
7766  const VmaSuballocation& subAlloc = *suballocItem;
7767 
7768  // Actual offset of this suballocation doesn't match expected one.
7769  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7770 
7771  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7772  // Two adjacent free suballocations are invalid. They should be merged.
7773  VMA_VALIDATE(!prevFree || !currFree);
7774 
7775  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7776 
7777  if(currFree)
7778  {
7779  calculatedSumFreeSize += subAlloc.size;
7780  ++calculatedFreeCount;
7781  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7782  {
7783  ++freeSuballocationsToRegister;
7784  }
7785 
7786  // Margin required between allocations - every free space must be at least that large.
7787  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7788  }
7789  else
7790  {
7791  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7792  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7793 
7794  // Margin required between allocations - previous allocation must be free.
7795  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7796  }
7797 
7798  calculatedOffset += subAlloc.size;
7799  prevFree = currFree;
7800  }
7801 
7802  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7803  // match expected one.
7804  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7805 
7806  VkDeviceSize lastSize = 0;
7807  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7808  {
7809  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7810 
7811  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7812  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7813  // They must be sorted by size ascending.
7814  VMA_VALIDATE(suballocItem->size >= lastSize);
7815 
7816  lastSize = suballocItem->size;
7817  }
7818 
7819  // Check if totals match calculacted values.
7820  VMA_VALIDATE(ValidateFreeSuballocationList());
7821  VMA_VALIDATE(calculatedOffset == GetSize());
7822  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7823  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7824 
7825  return true;
7826 }
7827 
7828 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7829 {
7830  if(!m_FreeSuballocationsBySize.empty())
7831  {
7832  return m_FreeSuballocationsBySize.back()->size;
7833  }
7834  else
7835  {
7836  return 0;
7837  }
7838 }
7839 
7840 bool VmaBlockMetadata_Generic::IsEmpty() const
7841 {
7842  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7843 }
7844 
7845 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7846 {
7847  outInfo.blockCount = 1;
7848 
7849  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7850  outInfo.allocationCount = rangeCount - m_FreeCount;
7851  outInfo.unusedRangeCount = m_FreeCount;
7852 
7853  outInfo.unusedBytes = m_SumFreeSize;
7854  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7855 
7856  outInfo.allocationSizeMin = UINT64_MAX;
7857  outInfo.allocationSizeMax = 0;
7858  outInfo.unusedRangeSizeMin = UINT64_MAX;
7859  outInfo.unusedRangeSizeMax = 0;
7860 
7861  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7862  suballocItem != m_Suballocations.cend();
7863  ++suballocItem)
7864  {
7865  const VmaSuballocation& suballoc = *suballocItem;
7866  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7867  {
7868  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7869  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7870  }
7871  else
7872  {
7873  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7874  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7875  }
7876  }
7877 }
7878 
7879 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7880 {
7881  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7882 
7883  inoutStats.size += GetSize();
7884  inoutStats.unusedSize += m_SumFreeSize;
7885  inoutStats.allocationCount += rangeCount - m_FreeCount;
7886  inoutStats.unusedRangeCount += m_FreeCount;
7887  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7888 }
7889 
7890 #if VMA_STATS_STRING_ENABLED
7891 
7892 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7893 {
7894  PrintDetailedMap_Begin(json,
7895  m_SumFreeSize, // unusedBytes
7896  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7897  m_FreeCount); // unusedRangeCount
7898 
7899  size_t i = 0;
7900  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7901  suballocItem != m_Suballocations.cend();
7902  ++suballocItem, ++i)
7903  {
7904  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7905  {
7906  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7907  }
7908  else
7909  {
7910  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7911  }
7912  }
7913 
7914  PrintDetailedMap_End(json);
7915 }
7916 
7917 #endif // #if VMA_STATS_STRING_ENABLED
7918 
7919 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7920  uint32_t currentFrameIndex,
7921  uint32_t frameInUseCount,
7922  VkDeviceSize bufferImageGranularity,
7923  VkDeviceSize allocSize,
7924  VkDeviceSize allocAlignment,
7925  bool upperAddress,
7926  VmaSuballocationType allocType,
7927  bool canMakeOtherLost,
7928  uint32_t strategy,
7929  VmaAllocationRequest* pAllocationRequest)
7930 {
7931  VMA_ASSERT(allocSize > 0);
7932  VMA_ASSERT(!upperAddress);
7933  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7934  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7935  VMA_HEAVY_ASSERT(Validate());
7936 
7937  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7938 
7939  // There is not enough total free space in this block to fullfill the request: Early return.
7940  if(canMakeOtherLost == false &&
7941  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7942  {
7943  return false;
7944  }
7945 
7946  // New algorithm, efficiently searching freeSuballocationsBySize.
7947  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7948  if(freeSuballocCount > 0)
7949  {
7951  {
7952  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7953  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7954  m_FreeSuballocationsBySize.data(),
7955  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7956  allocSize + 2 * VMA_DEBUG_MARGIN,
7957  VmaSuballocationItemSizeLess());
7958  size_t index = it - m_FreeSuballocationsBySize.data();
7959  for(; index < freeSuballocCount; ++index)
7960  {
7961  if(CheckAllocation(
7962  currentFrameIndex,
7963  frameInUseCount,
7964  bufferImageGranularity,
7965  allocSize,
7966  allocAlignment,
7967  allocType,
7968  m_FreeSuballocationsBySize[index],
7969  false, // canMakeOtherLost
7970  &pAllocationRequest->offset,
7971  &pAllocationRequest->itemsToMakeLostCount,
7972  &pAllocationRequest->sumFreeSize,
7973  &pAllocationRequest->sumItemSize))
7974  {
7975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7976  return true;
7977  }
7978  }
7979  }
7980  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7981  {
7982  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7983  it != m_Suballocations.end();
7984  ++it)
7985  {
7986  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7987  currentFrameIndex,
7988  frameInUseCount,
7989  bufferImageGranularity,
7990  allocSize,
7991  allocAlignment,
7992  allocType,
7993  it,
7994  false, // canMakeOtherLost
7995  &pAllocationRequest->offset,
7996  &pAllocationRequest->itemsToMakeLostCount,
7997  &pAllocationRequest->sumFreeSize,
7998  &pAllocationRequest->sumItemSize))
7999  {
8000  pAllocationRequest->item = it;
8001  return true;
8002  }
8003  }
8004  }
8005  else // WORST_FIT, FIRST_FIT
8006  {
8007  // Search staring from biggest suballocations.
8008  for(size_t index = freeSuballocCount; index--; )
8009  {
8010  if(CheckAllocation(
8011  currentFrameIndex,
8012  frameInUseCount,
8013  bufferImageGranularity,
8014  allocSize,
8015  allocAlignment,
8016  allocType,
8017  m_FreeSuballocationsBySize[index],
8018  false, // canMakeOtherLost
8019  &pAllocationRequest->offset,
8020  &pAllocationRequest->itemsToMakeLostCount,
8021  &pAllocationRequest->sumFreeSize,
8022  &pAllocationRequest->sumItemSize))
8023  {
8024  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8025  return true;
8026  }
8027  }
8028  }
8029  }
8030 
8031  if(canMakeOtherLost)
8032  {
8033  // Brute-force algorithm. TODO: Come up with something better.
8034 
8035  bool found = false;
8036  VmaAllocationRequest tmpAllocRequest = {};
8037  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8038  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8039  suballocIt != m_Suballocations.end();
8040  ++suballocIt)
8041  {
8042  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8043  suballocIt->hAllocation->CanBecomeLost())
8044  {
8045  if(CheckAllocation(
8046  currentFrameIndex,
8047  frameInUseCount,
8048  bufferImageGranularity,
8049  allocSize,
8050  allocAlignment,
8051  allocType,
8052  suballocIt,
8053  canMakeOtherLost,
8054  &tmpAllocRequest.offset,
8055  &tmpAllocRequest.itemsToMakeLostCount,
8056  &tmpAllocRequest.sumFreeSize,
8057  &tmpAllocRequest.sumItemSize))
8058  {
8060  {
8061  *pAllocationRequest = tmpAllocRequest;
8062  pAllocationRequest->item = suballocIt;
8063  break;
8064  }
8065  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8066  {
8067  *pAllocationRequest = tmpAllocRequest;
8068  pAllocationRequest->item = suballocIt;
8069  found = true;
8070  }
8071  }
8072  }
8073  }
8074 
8075  return found;
8076  }
8077 
8078  return false;
8079 }
8080 
8081 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8082  uint32_t currentFrameIndex,
8083  uint32_t frameInUseCount,
8084  VmaAllocationRequest* pAllocationRequest)
8085 {
8086  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8087 
8088  while(pAllocationRequest->itemsToMakeLostCount > 0)
8089  {
8090  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8091  {
8092  ++pAllocationRequest->item;
8093  }
8094  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8095  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8096  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8097  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8098  {
8099  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8100  --pAllocationRequest->itemsToMakeLostCount;
8101  }
8102  else
8103  {
8104  return false;
8105  }
8106  }
8107 
8108  VMA_HEAVY_ASSERT(Validate());
8109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8110  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8111 
8112  return true;
8113 }
8114 
8115 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8116 {
8117  uint32_t lostAllocationCount = 0;
8118  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8119  it != m_Suballocations.end();
8120  ++it)
8121  {
8122  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8123  it->hAllocation->CanBecomeLost() &&
8124  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8125  {
8126  it = FreeSuballocation(it);
8127  ++lostAllocationCount;
8128  }
8129  }
8130  return lostAllocationCount;
8131 }
8132 
8133 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8134 {
8135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8136  it != m_Suballocations.end();
8137  ++it)
8138  {
8139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8140  {
8141  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8142  {
8143  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8144  return VK_ERROR_VALIDATION_FAILED_EXT;
8145  }
8146  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8147  {
8148  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8149  return VK_ERROR_VALIDATION_FAILED_EXT;
8150  }
8151  }
8152  }
8153 
8154  return VK_SUCCESS;
8155 }
8156 
8157 void VmaBlockMetadata_Generic::Alloc(
8158  const VmaAllocationRequest& request,
8159  VmaSuballocationType type,
8160  VkDeviceSize allocSize,
8161  VmaAllocation hAllocation)
8162 {
8163  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8164  VMA_ASSERT(request.item != m_Suballocations.end());
8165  VmaSuballocation& suballoc = *request.item;
8166  // Given suballocation is a free block.
8167  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8168  // Given offset is inside this suballocation.
8169  VMA_ASSERT(request.offset >= suballoc.offset);
8170  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8171  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8172  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8173 
8174  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8175  // it to become used.
8176  UnregisterFreeSuballocation(request.item);
8177 
8178  suballoc.offset = request.offset;
8179  suballoc.size = allocSize;
8180  suballoc.type = type;
8181  suballoc.hAllocation = hAllocation;
8182 
8183  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8184  if(paddingEnd)
8185  {
8186  VmaSuballocation paddingSuballoc = {};
8187  paddingSuballoc.offset = request.offset + allocSize;
8188  paddingSuballoc.size = paddingEnd;
8189  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8190  VmaSuballocationList::iterator next = request.item;
8191  ++next;
8192  const VmaSuballocationList::iterator paddingEndItem =
8193  m_Suballocations.insert(next, paddingSuballoc);
8194  RegisterFreeSuballocation(paddingEndItem);
8195  }
8196 
8197  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8198  if(paddingBegin)
8199  {
8200  VmaSuballocation paddingSuballoc = {};
8201  paddingSuballoc.offset = request.offset - paddingBegin;
8202  paddingSuballoc.size = paddingBegin;
8203  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8204  const VmaSuballocationList::iterator paddingBeginItem =
8205  m_Suballocations.insert(request.item, paddingSuballoc);
8206  RegisterFreeSuballocation(paddingBeginItem);
8207  }
8208 
8209  // Update totals.
8210  m_FreeCount = m_FreeCount - 1;
8211  if(paddingBegin > 0)
8212  {
8213  ++m_FreeCount;
8214  }
8215  if(paddingEnd > 0)
8216  {
8217  ++m_FreeCount;
8218  }
8219  m_SumFreeSize -= allocSize;
8220 }
8221 
8222 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8223 {
8224  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8225  suballocItem != m_Suballocations.end();
8226  ++suballocItem)
8227  {
8228  VmaSuballocation& suballoc = *suballocItem;
8229  if(suballoc.hAllocation == allocation)
8230  {
8231  FreeSuballocation(suballocItem);
8232  VMA_HEAVY_ASSERT(Validate());
8233  return;
8234  }
8235  }
8236  VMA_ASSERT(0 && "Not found!");
8237 }
8238 
8239 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8240 {
8241  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8242  suballocItem != m_Suballocations.end();
8243  ++suballocItem)
8244  {
8245  VmaSuballocation& suballoc = *suballocItem;
8246  if(suballoc.offset == offset)
8247  {
8248  FreeSuballocation(suballocItem);
8249  return;
8250  }
8251  }
8252  VMA_ASSERT(0 && "Not found!");
8253 }
8254 
8255 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8256 {
8257  typedef VmaSuballocationList::iterator iter_type;
8258  for(iter_type suballocItem = m_Suballocations.begin();
8259  suballocItem != m_Suballocations.end();
8260  ++suballocItem)
8261  {
8262  VmaSuballocation& suballoc = *suballocItem;
8263  if(suballoc.hAllocation == alloc)
8264  {
8265  iter_type nextItem = suballocItem;
8266  ++nextItem;
8267 
8268  // Should have been ensured on higher level.
8269  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8270 
8271  // Shrinking.
8272  if(newSize < alloc->GetSize())
8273  {
8274  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8275 
8276  // There is next item.
8277  if(nextItem != m_Suballocations.end())
8278  {
8279  // Next item is free.
8280  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8281  {
8282  // Grow this next item backward.
8283  UnregisterFreeSuballocation(nextItem);
8284  nextItem->offset -= sizeDiff;
8285  nextItem->size += sizeDiff;
8286  RegisterFreeSuballocation(nextItem);
8287  }
8288  // Next item is not free.
8289  else
8290  {
8291  // Create free item after current one.
8292  VmaSuballocation newFreeSuballoc;
8293  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8294  newFreeSuballoc.offset = suballoc.offset + newSize;
8295  newFreeSuballoc.size = sizeDiff;
8296  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8297  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8298  RegisterFreeSuballocation(newFreeSuballocIt);
8299 
8300  ++m_FreeCount;
8301  }
8302  }
8303  // This is the last item.
8304  else
8305  {
8306  // Create free item at the end.
8307  VmaSuballocation newFreeSuballoc;
8308  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8309  newFreeSuballoc.offset = suballoc.offset + newSize;
8310  newFreeSuballoc.size = sizeDiff;
8311  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8312  m_Suballocations.push_back(newFreeSuballoc);
8313 
8314  iter_type newFreeSuballocIt = m_Suballocations.end();
8315  RegisterFreeSuballocation(--newFreeSuballocIt);
8316 
8317  ++m_FreeCount;
8318  }
8319 
8320  suballoc.size = newSize;
8321  m_SumFreeSize += sizeDiff;
8322  }
8323  // Growing.
8324  else
8325  {
8326  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8327 
8328  // There is next item.
8329  if(nextItem != m_Suballocations.end())
8330  {
8331  // Next item is free.
8332  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8333  {
8334  // There is not enough free space, including margin.
8335  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8336  {
8337  return false;
8338  }
8339 
8340  // There is more free space than required.
8341  if(nextItem->size > sizeDiff)
8342  {
8343  // Move and shrink this next item.
8344  UnregisterFreeSuballocation(nextItem);
8345  nextItem->offset += sizeDiff;
8346  nextItem->size -= sizeDiff;
8347  RegisterFreeSuballocation(nextItem);
8348  }
8349  // There is exactly the amount of free space required.
8350  else
8351  {
8352  // Remove this next free item.
8353  UnregisterFreeSuballocation(nextItem);
8354  m_Suballocations.erase(nextItem);
8355  --m_FreeCount;
8356  }
8357  }
8358  // Next item is not free - there is no space to grow.
8359  else
8360  {
8361  return false;
8362  }
8363  }
8364  // This is the last item - there is no space to grow.
8365  else
8366  {
8367  return false;
8368  }
8369 
8370  suballoc.size = newSize;
8371  m_SumFreeSize -= sizeDiff;
8372  }
8373 
8374  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8375  return true;
8376  }
8377  }
8378  VMA_ASSERT(0 && "Not found!");
8379  return false;
8380 }
8381 
8382 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8383 {
8384  VkDeviceSize lastSize = 0;
8385  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8386  {
8387  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8388 
8389  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8390  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8391  VMA_VALIDATE(it->size >= lastSize);
8392  lastSize = it->size;
8393  }
8394  return true;
8395 }
8396 
8397 bool VmaBlockMetadata_Generic::CheckAllocation(
8398  uint32_t currentFrameIndex,
8399  uint32_t frameInUseCount,
8400  VkDeviceSize bufferImageGranularity,
8401  VkDeviceSize allocSize,
8402  VkDeviceSize allocAlignment,
8403  VmaSuballocationType allocType,
8404  VmaSuballocationList::const_iterator suballocItem,
8405  bool canMakeOtherLost,
8406  VkDeviceSize* pOffset,
8407  size_t* itemsToMakeLostCount,
8408  VkDeviceSize* pSumFreeSize,
8409  VkDeviceSize* pSumItemSize) const
8410 {
8411  VMA_ASSERT(allocSize > 0);
8412  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8413  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8414  VMA_ASSERT(pOffset != VMA_NULL);
8415 
8416  *itemsToMakeLostCount = 0;
8417  *pSumFreeSize = 0;
8418  *pSumItemSize = 0;
8419 
8420  if(canMakeOtherLost)
8421  {
8422  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8423  {
8424  *pSumFreeSize = suballocItem->size;
8425  }
8426  else
8427  {
8428  if(suballocItem->hAllocation->CanBecomeLost() &&
8429  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8430  {
8431  ++*itemsToMakeLostCount;
8432  *pSumItemSize = suballocItem->size;
8433  }
8434  else
8435  {
8436  return false;
8437  }
8438  }
8439 
8440  // Remaining size is too small for this request: Early return.
8441  if(GetSize() - suballocItem->offset < allocSize)
8442  {
8443  return false;
8444  }
8445 
8446  // Start from offset equal to beginning of this suballocation.
8447  *pOffset = suballocItem->offset;
8448 
8449  // Apply VMA_DEBUG_MARGIN at the beginning.
8450  if(VMA_DEBUG_MARGIN > 0)
8451  {
8452  *pOffset += VMA_DEBUG_MARGIN;
8453  }
8454 
8455  // Apply alignment.
8456  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8457 
8458  // Check previous suballocations for BufferImageGranularity conflicts.
8459  // Make bigger alignment if necessary.
8460  if(bufferImageGranularity > 1)
8461  {
8462  bool bufferImageGranularityConflict = false;
8463  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8464  while(prevSuballocItem != m_Suballocations.cbegin())
8465  {
8466  --prevSuballocItem;
8467  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8468  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8469  {
8470  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8471  {
8472  bufferImageGranularityConflict = true;
8473  break;
8474  }
8475  }
8476  else
8477  // Already on previous page.
8478  break;
8479  }
8480  if(bufferImageGranularityConflict)
8481  {
8482  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8483  }
8484  }
8485 
8486  // Now that we have final *pOffset, check if we are past suballocItem.
8487  // If yes, return false - this function should be called for another suballocItem as starting point.
8488  if(*pOffset >= suballocItem->offset + suballocItem->size)
8489  {
8490  return false;
8491  }
8492 
8493  // Calculate padding at the beginning based on current offset.
8494  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8495 
8496  // Calculate required margin at the end.
8497  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8498 
8499  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8500  // Another early return check.
8501  if(suballocItem->offset + totalSize > GetSize())
8502  {
8503  return false;
8504  }
8505 
8506  // Advance lastSuballocItem until desired size is reached.
8507  // Update itemsToMakeLostCount.
8508  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8509  if(totalSize > suballocItem->size)
8510  {
8511  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8512  while(remainingSize > 0)
8513  {
8514  ++lastSuballocItem;
8515  if(lastSuballocItem == m_Suballocations.cend())
8516  {
8517  return false;
8518  }
8519  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8520  {
8521  *pSumFreeSize += lastSuballocItem->size;
8522  }
8523  else
8524  {
8525  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8526  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8527  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8528  {
8529  ++*itemsToMakeLostCount;
8530  *pSumItemSize += lastSuballocItem->size;
8531  }
8532  else
8533  {
8534  return false;
8535  }
8536  }
8537  remainingSize = (lastSuballocItem->size < remainingSize) ?
8538  remainingSize - lastSuballocItem->size : 0;
8539  }
8540  }
8541 
8542  // Check next suballocations for BufferImageGranularity conflicts.
8543  // If conflict exists, we must mark more allocations lost or fail.
8544  if(bufferImageGranularity > 1)
8545  {
8546  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8547  ++nextSuballocItem;
8548  while(nextSuballocItem != m_Suballocations.cend())
8549  {
8550  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8551  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8552  {
8553  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8554  {
8555  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8556  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8557  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8558  {
8559  ++*itemsToMakeLostCount;
8560  }
8561  else
8562  {
8563  return false;
8564  }
8565  }
8566  }
8567  else
8568  {
8569  // Already on next page.
8570  break;
8571  }
8572  ++nextSuballocItem;
8573  }
8574  }
8575  }
8576  else
8577  {
8578  const VmaSuballocation& suballoc = *suballocItem;
8579  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8580 
8581  *pSumFreeSize = suballoc.size;
8582 
8583  // Size of this suballocation is too small for this request: Early return.
8584  if(suballoc.size < allocSize)
8585  {
8586  return false;
8587  }
8588 
8589  // Start from offset equal to beginning of this suballocation.
8590  *pOffset = suballoc.offset;
8591 
8592  // Apply VMA_DEBUG_MARGIN at the beginning.
8593  if(VMA_DEBUG_MARGIN > 0)
8594  {
8595  *pOffset += VMA_DEBUG_MARGIN;
8596  }
8597 
8598  // Apply alignment.
8599  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8600 
8601  // Check previous suballocations for BufferImageGranularity conflicts.
8602  // Make bigger alignment if necessary.
8603  if(bufferImageGranularity > 1)
8604  {
8605  bool bufferImageGranularityConflict = false;
8606  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8607  while(prevSuballocItem != m_Suballocations.cbegin())
8608  {
8609  --prevSuballocItem;
8610  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8611  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8612  {
8613  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8614  {
8615  bufferImageGranularityConflict = true;
8616  break;
8617  }
8618  }
8619  else
8620  // Already on previous page.
8621  break;
8622  }
8623  if(bufferImageGranularityConflict)
8624  {
8625  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8626  }
8627  }
8628 
8629  // Calculate padding at the beginning based on current offset.
8630  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8631 
8632  // Calculate required margin at the end.
8633  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8634 
8635  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8636  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8637  {
8638  return false;
8639  }
8640 
8641  // Check next suballocations for BufferImageGranularity conflicts.
8642  // If conflict exists, allocation cannot be made here.
8643  if(bufferImageGranularity > 1)
8644  {
8645  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8646  ++nextSuballocItem;
8647  while(nextSuballocItem != m_Suballocations.cend())
8648  {
8649  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8650  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8651  {
8652  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8653  {
8654  return false;
8655  }
8656  }
8657  else
8658  {
8659  // Already on next page.
8660  break;
8661  }
8662  ++nextSuballocItem;
8663  }
8664  }
8665  }
8666 
8667  // All tests passed: Success. pOffset is already filled.
8668  return true;
8669 }
8670 
8671 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8672 {
8673  VMA_ASSERT(item != m_Suballocations.end());
8674  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8675 
8676  VmaSuballocationList::iterator nextItem = item;
8677  ++nextItem;
8678  VMA_ASSERT(nextItem != m_Suballocations.end());
8679  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8680 
8681  item->size += nextItem->size;
8682  --m_FreeCount;
8683  m_Suballocations.erase(nextItem);
8684 }
8685 
8686 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8687 {
8688  // Change this suballocation to be marked as free.
8689  VmaSuballocation& suballoc = *suballocItem;
8690  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8691  suballoc.hAllocation = VK_NULL_HANDLE;
8692 
8693  // Update totals.
8694  ++m_FreeCount;
8695  m_SumFreeSize += suballoc.size;
8696 
8697  // Merge with previous and/or next suballocation if it's also free.
8698  bool mergeWithNext = false;
8699  bool mergeWithPrev = false;
8700 
8701  VmaSuballocationList::iterator nextItem = suballocItem;
8702  ++nextItem;
8703  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8704  {
8705  mergeWithNext = true;
8706  }
8707 
8708  VmaSuballocationList::iterator prevItem = suballocItem;
8709  if(suballocItem != m_Suballocations.begin())
8710  {
8711  --prevItem;
8712  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8713  {
8714  mergeWithPrev = true;
8715  }
8716  }
8717 
8718  if(mergeWithNext)
8719  {
8720  UnregisterFreeSuballocation(nextItem);
8721  MergeFreeWithNext(suballocItem);
8722  }
8723 
8724  if(mergeWithPrev)
8725  {
8726  UnregisterFreeSuballocation(prevItem);
8727  MergeFreeWithNext(prevItem);
8728  RegisterFreeSuballocation(prevItem);
8729  return prevItem;
8730  }
8731  else
8732  {
8733  RegisterFreeSuballocation(suballocItem);
8734  return suballocItem;
8735  }
8736 }
8737 
8738 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8739 {
8740  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8741  VMA_ASSERT(item->size > 0);
8742 
8743  // You may want to enable this validation at the beginning or at the end of
8744  // this function, depending on what do you want to check.
8745  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8746 
8747  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8748  {
8749  if(m_FreeSuballocationsBySize.empty())
8750  {
8751  m_FreeSuballocationsBySize.push_back(item);
8752  }
8753  else
8754  {
8755  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8756  }
8757  }
8758 
8759  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8760 }
8761 
8762 
8763 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8764 {
8765  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8766  VMA_ASSERT(item->size > 0);
8767 
8768  // You may want to enable this validation at the beginning or at the end of
8769  // this function, depending on what do you want to check.
8770  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8771 
8772  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8773  {
8774  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8775  m_FreeSuballocationsBySize.data(),
8776  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8777  item,
8778  VmaSuballocationItemSizeLess());
8779  for(size_t index = it - m_FreeSuballocationsBySize.data();
8780  index < m_FreeSuballocationsBySize.size();
8781  ++index)
8782  {
8783  if(m_FreeSuballocationsBySize[index] == item)
8784  {
8785  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8786  return;
8787  }
8788  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8789  }
8790  VMA_ASSERT(0 && "Not found.");
8791  }
8792 
8793  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8794 }
8795 
8796 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8797  VkDeviceSize bufferImageGranularity,
8798  VmaSuballocationType& inOutPrevSuballocType) const
8799 {
8800  if(bufferImageGranularity == 1 || IsEmpty())
8801  {
8802  return false;
8803  }
8804 
8805  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8806  bool typeConflictFound = false;
8807  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8808  it != m_Suballocations.cend();
8809  ++it)
8810  {
8811  const VmaSuballocationType suballocType = it->type;
8812  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8813  {
8814  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8815  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8816  {
8817  typeConflictFound = true;
8818  }
8819  inOutPrevSuballocType = suballocType;
8820  }
8821  }
8822 
8823  return typeConflictFound || minAlignment >= bufferImageGranularity;
8824 }
8825 
8827 // class VmaBlockMetadata_Linear
8828 
8829 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8830  VmaBlockMetadata(hAllocator),
8831  m_SumFreeSize(0),
8832  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8833  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8834  m_1stVectorIndex(0),
8835  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8836  m_1stNullItemsBeginCount(0),
8837  m_1stNullItemsMiddleCount(0),
8838  m_2ndNullItemsCount(0)
8839 {
8840 }
8841 
8842 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8843 {
8844 }
8845 
8846 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8847 {
8848  VmaBlockMetadata::Init(size);
8849  m_SumFreeSize = size;
8850 }
8851 
8852 bool VmaBlockMetadata_Linear::Validate() const
8853 {
8854  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8855  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8856 
8857  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8858  VMA_VALIDATE(!suballocations1st.empty() ||
8859  suballocations2nd.empty() ||
8860  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8861 
8862  if(!suballocations1st.empty())
8863  {
8864  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8865  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8866  // Null item at the end should be just pop_back().
8867  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8868  }
8869  if(!suballocations2nd.empty())
8870  {
8871  // Null item at the end should be just pop_back().
8872  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8873  }
8874 
8875  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8876  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8877 
8878  VkDeviceSize sumUsedSize = 0;
8879  const size_t suballoc1stCount = suballocations1st.size();
8880  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8881 
8882  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8883  {
8884  const size_t suballoc2ndCount = suballocations2nd.size();
8885  size_t nullItem2ndCount = 0;
8886  for(size_t i = 0; i < suballoc2ndCount; ++i)
8887  {
8888  const VmaSuballocation& suballoc = suballocations2nd[i];
8889  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8890 
8891  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8892  VMA_VALIDATE(suballoc.offset >= offset);
8893 
8894  if(!currFree)
8895  {
8896  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8897  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8898  sumUsedSize += suballoc.size;
8899  }
8900  else
8901  {
8902  ++nullItem2ndCount;
8903  }
8904 
8905  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8906  }
8907 
8908  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8909  }
8910 
8911  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8912  {
8913  const VmaSuballocation& suballoc = suballocations1st[i];
8914  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8915  suballoc.hAllocation == VK_NULL_HANDLE);
8916  }
8917 
8918  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8919 
8920  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8921  {
8922  const VmaSuballocation& suballoc = suballocations1st[i];
8923  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8924 
8925  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8926  VMA_VALIDATE(suballoc.offset >= offset);
8927  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8928 
8929  if(!currFree)
8930  {
8931  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8932  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8933  sumUsedSize += suballoc.size;
8934  }
8935  else
8936  {
8937  ++nullItem1stCount;
8938  }
8939 
8940  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8941  }
8942  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8943 
8944  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8945  {
8946  const size_t suballoc2ndCount = suballocations2nd.size();
8947  size_t nullItem2ndCount = 0;
8948  for(size_t i = suballoc2ndCount; i--; )
8949  {
8950  const VmaSuballocation& suballoc = suballocations2nd[i];
8951  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8952 
8953  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8954  VMA_VALIDATE(suballoc.offset >= offset);
8955 
8956  if(!currFree)
8957  {
8958  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8959  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8960  sumUsedSize += suballoc.size;
8961  }
8962  else
8963  {
8964  ++nullItem2ndCount;
8965  }
8966 
8967  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8968  }
8969 
8970  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8971  }
8972 
8973  VMA_VALIDATE(offset <= GetSize());
8974  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8975 
8976  return true;
8977 }
8978 
8979 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8980 {
8981  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8982  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8983 }
8984 
8985 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8986 {
8987  const VkDeviceSize size = GetSize();
8988 
8989  /*
8990  We don't consider gaps inside allocation vectors with freed allocations because
8991  they are not suitable for reuse in linear allocator. We consider only space that
8992  is available for new allocations.
8993  */
8994  if(IsEmpty())
8995  {
8996  return size;
8997  }
8998 
8999  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9000 
9001  switch(m_2ndVectorMode)
9002  {
9003  case SECOND_VECTOR_EMPTY:
9004  /*
9005  Available space is after end of 1st, as well as before beginning of 1st (which
9006  whould make it a ring buffer).
9007  */
9008  {
9009  const size_t suballocations1stCount = suballocations1st.size();
9010  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9011  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9012  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9013  return VMA_MAX(
9014  firstSuballoc.offset,
9015  size - (lastSuballoc.offset + lastSuballoc.size));
9016  }
9017  break;
9018 
9019  case SECOND_VECTOR_RING_BUFFER:
9020  /*
9021  Available space is only between end of 2nd and beginning of 1st.
9022  */
9023  {
9024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9025  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9026  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9027  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9028  }
9029  break;
9030 
9031  case SECOND_VECTOR_DOUBLE_STACK:
9032  /*
9033  Available space is only between end of 1st and top of 2nd.
9034  */
9035  {
9036  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9037  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9038  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9039  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9040  }
9041  break;
9042 
9043  default:
9044  VMA_ASSERT(0);
9045  return 0;
9046  }
9047 }
9048 
9049 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9050 {
9051  const VkDeviceSize size = GetSize();
9052  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9053  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9054  const size_t suballoc1stCount = suballocations1st.size();
9055  const size_t suballoc2ndCount = suballocations2nd.size();
9056 
9057  outInfo.blockCount = 1;
9058  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9059  outInfo.unusedRangeCount = 0;
9060  outInfo.usedBytes = 0;
9061  outInfo.allocationSizeMin = UINT64_MAX;
9062  outInfo.allocationSizeMax = 0;
9063  outInfo.unusedRangeSizeMin = UINT64_MAX;
9064  outInfo.unusedRangeSizeMax = 0;
9065 
9066  VkDeviceSize lastOffset = 0;
9067 
9068  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9069  {
9070  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9071  size_t nextAlloc2ndIndex = 0;
9072  while(lastOffset < freeSpace2ndTo1stEnd)
9073  {
9074  // Find next non-null allocation or move nextAllocIndex to the end.
9075  while(nextAlloc2ndIndex < suballoc2ndCount &&
9076  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9077  {
9078  ++nextAlloc2ndIndex;
9079  }
9080 
9081  // Found non-null allocation.
9082  if(nextAlloc2ndIndex < suballoc2ndCount)
9083  {
9084  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9085 
9086  // 1. Process free space before this allocation.
9087  if(lastOffset < suballoc.offset)
9088  {
9089  // There is free space from lastOffset to suballoc.offset.
9090  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9091  ++outInfo.unusedRangeCount;
9092  outInfo.unusedBytes += unusedRangeSize;
9093  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9094  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9095  }
9096 
9097  // 2. Process this allocation.
9098  // There is allocation with suballoc.offset, suballoc.size.
9099  outInfo.usedBytes += suballoc.size;
9100  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9101  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9102 
9103  // 3. Prepare for next iteration.
9104  lastOffset = suballoc.offset + suballoc.size;
9105  ++nextAlloc2ndIndex;
9106  }
9107  // We are at the end.
9108  else
9109  {
9110  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9111  if(lastOffset < freeSpace2ndTo1stEnd)
9112  {
9113  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9114  ++outInfo.unusedRangeCount;
9115  outInfo.unusedBytes += unusedRangeSize;
9116  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9117  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9118  }
9119 
9120  // End of loop.
9121  lastOffset = freeSpace2ndTo1stEnd;
9122  }
9123  }
9124  }
9125 
9126  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9127  const VkDeviceSize freeSpace1stTo2ndEnd =
9128  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9129  while(lastOffset < freeSpace1stTo2ndEnd)
9130  {
9131  // Find next non-null allocation or move nextAllocIndex to the end.
9132  while(nextAlloc1stIndex < suballoc1stCount &&
9133  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9134  {
9135  ++nextAlloc1stIndex;
9136  }
9137 
9138  // Found non-null allocation.
9139  if(nextAlloc1stIndex < suballoc1stCount)
9140  {
9141  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9142 
9143  // 1. Process free space before this allocation.
9144  if(lastOffset < suballoc.offset)
9145  {
9146  // There is free space from lastOffset to suballoc.offset.
9147  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9148  ++outInfo.unusedRangeCount;
9149  outInfo.unusedBytes += unusedRangeSize;
9150  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9151  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // 2. Process this allocation.
9155  // There is allocation with suballoc.offset, suballoc.size.
9156  outInfo.usedBytes += suballoc.size;
9157  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9158  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9159 
9160  // 3. Prepare for next iteration.
9161  lastOffset = suballoc.offset + suballoc.size;
9162  ++nextAlloc1stIndex;
9163  }
9164  // We are at the end.
9165  else
9166  {
9167  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9168  if(lastOffset < freeSpace1stTo2ndEnd)
9169  {
9170  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9171  ++outInfo.unusedRangeCount;
9172  outInfo.unusedBytes += unusedRangeSize;
9173  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9174  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // End of loop.
9178  lastOffset = freeSpace1stTo2ndEnd;
9179  }
9180  }
9181 
9182  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9183  {
9184  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9185  while(lastOffset < size)
9186  {
9187  // Find next non-null allocation or move nextAllocIndex to the end.
9188  while(nextAlloc2ndIndex != SIZE_MAX &&
9189  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9190  {
9191  --nextAlloc2ndIndex;
9192  }
9193 
9194  // Found non-null allocation.
9195  if(nextAlloc2ndIndex != SIZE_MAX)
9196  {
9197  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9198 
9199  // 1. Process free space before this allocation.
9200  if(lastOffset < suballoc.offset)
9201  {
9202  // There is free space from lastOffset to suballoc.offset.
9203  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9204  ++outInfo.unusedRangeCount;
9205  outInfo.unusedBytes += unusedRangeSize;
9206  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9207  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9208  }
9209 
9210  // 2. Process this allocation.
9211  // There is allocation with suballoc.offset, suballoc.size.
9212  outInfo.usedBytes += suballoc.size;
9213  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9214  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9215 
9216  // 3. Prepare for next iteration.
9217  lastOffset = suballoc.offset + suballoc.size;
9218  --nextAlloc2ndIndex;
9219  }
9220  // We are at the end.
9221  else
9222  {
9223  // There is free space from lastOffset to size.
9224  if(lastOffset < size)
9225  {
9226  const VkDeviceSize unusedRangeSize = size - lastOffset;
9227  ++outInfo.unusedRangeCount;
9228  outInfo.unusedBytes += unusedRangeSize;
9229  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9230  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9231  }
9232 
9233  // End of loop.
9234  lastOffset = size;
9235  }
9236  }
9237  }
9238 
9239  outInfo.unusedBytes = size - outInfo.usedBytes;
9240 }
9241 
9242 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9243 {
9244  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9245  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9246  const VkDeviceSize size = GetSize();
9247  const size_t suballoc1stCount = suballocations1st.size();
9248  const size_t suballoc2ndCount = suballocations2nd.size();
9249 
9250  inoutStats.size += size;
9251 
9252  VkDeviceSize lastOffset = 0;
9253 
9254  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9255  {
9256  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9257  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9258  while(lastOffset < freeSpace2ndTo1stEnd)
9259  {
9260  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9261  while(nextAlloc2ndIndex < suballoc2ndCount &&
9262  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9263  {
9264  ++nextAlloc2ndIndex;
9265  }
9266 
9267  // Found non-null allocation.
9268  if(nextAlloc2ndIndex < suballoc2ndCount)
9269  {
9270  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9271 
9272  // 1. Process free space before this allocation.
9273  if(lastOffset < suballoc.offset)
9274  {
9275  // There is free space from lastOffset to suballoc.offset.
9276  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // 2. Process this allocation.
9283  // There is allocation with suballoc.offset, suballoc.size.
9284  ++inoutStats.allocationCount;
9285 
9286  // 3. Prepare for next iteration.
9287  lastOffset = suballoc.offset + suballoc.size;
9288  ++nextAlloc2ndIndex;
9289  }
9290  // We are at the end.
9291  else
9292  {
9293  if(lastOffset < freeSpace2ndTo1stEnd)
9294  {
9295  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9296  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9297  inoutStats.unusedSize += unusedRangeSize;
9298  ++inoutStats.unusedRangeCount;
9299  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9300  }
9301 
9302  // End of loop.
9303  lastOffset = freeSpace2ndTo1stEnd;
9304  }
9305  }
9306  }
9307 
9308  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9309  const VkDeviceSize freeSpace1stTo2ndEnd =
9310  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9311  while(lastOffset < freeSpace1stTo2ndEnd)
9312  {
9313  // Find next non-null allocation or move nextAllocIndex to the end.
9314  while(nextAlloc1stIndex < suballoc1stCount &&
9315  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9316  {
9317  ++nextAlloc1stIndex;
9318  }
9319 
9320  // Found non-null allocation.
9321  if(nextAlloc1stIndex < suballoc1stCount)
9322  {
9323  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9324 
9325  // 1. Process free space before this allocation.
9326  if(lastOffset < suballoc.offset)
9327  {
9328  // There is free space from lastOffset to suballoc.offset.
9329  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9330  inoutStats.unusedSize += unusedRangeSize;
9331  ++inoutStats.unusedRangeCount;
9332  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9333  }
9334 
9335  // 2. Process this allocation.
9336  // There is allocation with suballoc.offset, suballoc.size.
9337  ++inoutStats.allocationCount;
9338 
9339  // 3. Prepare for next iteration.
9340  lastOffset = suballoc.offset + suballoc.size;
9341  ++nextAlloc1stIndex;
9342  }
9343  // We are at the end.
9344  else
9345  {
9346  if(lastOffset < freeSpace1stTo2ndEnd)
9347  {
9348  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9349  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9350  inoutStats.unusedSize += unusedRangeSize;
9351  ++inoutStats.unusedRangeCount;
9352  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9353  }
9354 
9355  // End of loop.
9356  lastOffset = freeSpace1stTo2ndEnd;
9357  }
9358  }
9359 
9360  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9361  {
9362  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9363  while(lastOffset < size)
9364  {
9365  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9366  while(nextAlloc2ndIndex != SIZE_MAX &&
9367  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9368  {
9369  --nextAlloc2ndIndex;
9370  }
9371 
9372  // Found non-null allocation.
9373  if(nextAlloc2ndIndex != SIZE_MAX)
9374  {
9375  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9376 
9377  // 1. Process free space before this allocation.
9378  if(lastOffset < suballoc.offset)
9379  {
9380  // There is free space from lastOffset to suballoc.offset.
9381  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9382  inoutStats.unusedSize += unusedRangeSize;
9383  ++inoutStats.unusedRangeCount;
9384  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9385  }
9386 
9387  // 2. Process this allocation.
9388  // There is allocation with suballoc.offset, suballoc.size.
9389  ++inoutStats.allocationCount;
9390 
9391  // 3. Prepare for next iteration.
9392  lastOffset = suballoc.offset + suballoc.size;
9393  --nextAlloc2ndIndex;
9394  }
9395  // We are at the end.
9396  else
9397  {
9398  if(lastOffset < size)
9399  {
9400  // There is free space from lastOffset to size.
9401  const VkDeviceSize unusedRangeSize = size - lastOffset;
9402  inoutStats.unusedSize += unusedRangeSize;
9403  ++inoutStats.unusedRangeCount;
9404  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9405  }
9406 
9407  // End of loop.
9408  lastOffset = size;
9409  }
9410  }
9411  }
9412 }
9413 
9414 #if VMA_STATS_STRING_ENABLED
9415 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9416 {
9417  const VkDeviceSize size = GetSize();
9418  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9419  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9420  const size_t suballoc1stCount = suballocations1st.size();
9421  const size_t suballoc2ndCount = suballocations2nd.size();
9422 
9423  // FIRST PASS
9424 
9425  size_t unusedRangeCount = 0;
9426  VkDeviceSize usedBytes = 0;
9427 
9428  VkDeviceSize lastOffset = 0;
9429 
9430  size_t alloc2ndCount = 0;
9431  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9432  {
9433  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9434  size_t nextAlloc2ndIndex = 0;
9435  while(lastOffset < freeSpace2ndTo1stEnd)
9436  {
9437  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9438  while(nextAlloc2ndIndex < suballoc2ndCount &&
9439  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9440  {
9441  ++nextAlloc2ndIndex;
9442  }
9443 
9444  // Found non-null allocation.
9445  if(nextAlloc2ndIndex < suballoc2ndCount)
9446  {
9447  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9448 
9449  // 1. Process free space before this allocation.
9450  if(lastOffset < suballoc.offset)
9451  {
9452  // There is free space from lastOffset to suballoc.offset.
9453  ++unusedRangeCount;
9454  }
9455 
9456  // 2. Process this allocation.
9457  // There is allocation with suballoc.offset, suballoc.size.
9458  ++alloc2ndCount;
9459  usedBytes += suballoc.size;
9460 
9461  // 3. Prepare for next iteration.
9462  lastOffset = suballoc.offset + suballoc.size;
9463  ++nextAlloc2ndIndex;
9464  }
9465  // We are at the end.
9466  else
9467  {
9468  if(lastOffset < freeSpace2ndTo1stEnd)
9469  {
9470  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9471  ++unusedRangeCount;
9472  }
9473 
9474  // End of loop.
9475  lastOffset = freeSpace2ndTo1stEnd;
9476  }
9477  }
9478  }
9479 
9480  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9481  size_t alloc1stCount = 0;
9482  const VkDeviceSize freeSpace1stTo2ndEnd =
9483  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9484  while(lastOffset < freeSpace1stTo2ndEnd)
9485  {
9486  // Find next non-null allocation or move nextAllocIndex to the end.
9487  while(nextAlloc1stIndex < suballoc1stCount &&
9488  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9489  {
9490  ++nextAlloc1stIndex;
9491  }
9492 
9493  // Found non-null allocation.
9494  if(nextAlloc1stIndex < suballoc1stCount)
9495  {
9496  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9497 
9498  // 1. Process free space before this allocation.
9499  if(lastOffset < suballoc.offset)
9500  {
9501  // There is free space from lastOffset to suballoc.offset.
9502  ++unusedRangeCount;
9503  }
9504 
9505  // 2. Process this allocation.
9506  // There is allocation with suballoc.offset, suballoc.size.
9507  ++alloc1stCount;
9508  usedBytes += suballoc.size;
9509 
9510  // 3. Prepare for next iteration.
9511  lastOffset = suballoc.offset + suballoc.size;
9512  ++nextAlloc1stIndex;
9513  }
9514  // We are at the end.
9515  else
9516  {
9517  if(lastOffset < size)
9518  {
9519  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9520  ++unusedRangeCount;
9521  }
9522 
9523  // End of loop.
9524  lastOffset = freeSpace1stTo2ndEnd;
9525  }
9526  }
9527 
9528  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9529  {
9530  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9531  while(lastOffset < size)
9532  {
9533  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9534  while(nextAlloc2ndIndex != SIZE_MAX &&
9535  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9536  {
9537  --nextAlloc2ndIndex;
9538  }
9539 
9540  // Found non-null allocation.
9541  if(nextAlloc2ndIndex != SIZE_MAX)
9542  {
9543  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9544 
9545  // 1. Process free space before this allocation.
9546  if(lastOffset < suballoc.offset)
9547  {
9548  // There is free space from lastOffset to suballoc.offset.
9549  ++unusedRangeCount;
9550  }
9551 
9552  // 2. Process this allocation.
9553  // There is allocation with suballoc.offset, suballoc.size.
9554  ++alloc2ndCount;
9555  usedBytes += suballoc.size;
9556 
9557  // 3. Prepare for next iteration.
9558  lastOffset = suballoc.offset + suballoc.size;
9559  --nextAlloc2ndIndex;
9560  }
9561  // We are at the end.
9562  else
9563  {
9564  if(lastOffset < size)
9565  {
9566  // There is free space from lastOffset to size.
9567  ++unusedRangeCount;
9568  }
9569 
9570  // End of loop.
9571  lastOffset = size;
9572  }
9573  }
9574  }
9575 
9576  const VkDeviceSize unusedBytes = size - usedBytes;
9577  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9578 
9579  // SECOND PASS
9580  lastOffset = 0;
9581 
9582  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9583  {
9584  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9585  size_t nextAlloc2ndIndex = 0;
9586  while(lastOffset < freeSpace2ndTo1stEnd)
9587  {
9588  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9589  while(nextAlloc2ndIndex < suballoc2ndCount &&
9590  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9591  {
9592  ++nextAlloc2ndIndex;
9593  }
9594 
9595  // Found non-null allocation.
9596  if(nextAlloc2ndIndex < suballoc2ndCount)
9597  {
9598  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9599 
9600  // 1. Process free space before this allocation.
9601  if(lastOffset < suballoc.offset)
9602  {
9603  // There is free space from lastOffset to suballoc.offset.
9604  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9605  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9606  }
9607 
9608  // 2. Process this allocation.
9609  // There is allocation with suballoc.offset, suballoc.size.
9610  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9611 
9612  // 3. Prepare for next iteration.
9613  lastOffset = suballoc.offset + suballoc.size;
9614  ++nextAlloc2ndIndex;
9615  }
9616  // We are at the end.
9617  else
9618  {
9619  if(lastOffset < freeSpace2ndTo1stEnd)
9620  {
9621  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9622  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9623  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9624  }
9625 
9626  // End of loop.
9627  lastOffset = freeSpace2ndTo1stEnd;
9628  }
9629  }
9630  }
9631 
9632  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9633  while(lastOffset < freeSpace1stTo2ndEnd)
9634  {
9635  // Find next non-null allocation or move nextAllocIndex to the end.
9636  while(nextAlloc1stIndex < suballoc1stCount &&
9637  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9638  {
9639  ++nextAlloc1stIndex;
9640  }
9641 
9642  // Found non-null allocation.
9643  if(nextAlloc1stIndex < suballoc1stCount)
9644  {
9645  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9646 
9647  // 1. Process free space before this allocation.
9648  if(lastOffset < suballoc.offset)
9649  {
9650  // There is free space from lastOffset to suballoc.offset.
9651  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9652  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9653  }
9654 
9655  // 2. Process this allocation.
9656  // There is allocation with suballoc.offset, suballoc.size.
9657  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9658 
9659  // 3. Prepare for next iteration.
9660  lastOffset = suballoc.offset + suballoc.size;
9661  ++nextAlloc1stIndex;
9662  }
9663  // We are at the end.
9664  else
9665  {
9666  if(lastOffset < freeSpace1stTo2ndEnd)
9667  {
9668  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9669  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9670  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9671  }
9672 
9673  // End of loop.
9674  lastOffset = freeSpace1stTo2ndEnd;
9675  }
9676  }
9677 
9678  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9679  {
9680  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9681  while(lastOffset < size)
9682  {
9683  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9684  while(nextAlloc2ndIndex != SIZE_MAX &&
9685  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9686  {
9687  --nextAlloc2ndIndex;
9688  }
9689 
9690  // Found non-null allocation.
9691  if(nextAlloc2ndIndex != SIZE_MAX)
9692  {
9693  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9694 
9695  // 1. Process free space before this allocation.
9696  if(lastOffset < suballoc.offset)
9697  {
9698  // There is free space from lastOffset to suballoc.offset.
9699  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9700  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9701  }
9702 
9703  // 2. Process this allocation.
9704  // There is allocation with suballoc.offset, suballoc.size.
9705  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9706 
9707  // 3. Prepare for next iteration.
9708  lastOffset = suballoc.offset + suballoc.size;
9709  --nextAlloc2ndIndex;
9710  }
9711  // We are at the end.
9712  else
9713  {
9714  if(lastOffset < size)
9715  {
9716  // There is free space from lastOffset to size.
9717  const VkDeviceSize unusedRangeSize = size - lastOffset;
9718  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9719  }
9720 
9721  // End of loop.
9722  lastOffset = size;
9723  }
9724  }
9725  }
9726 
9727  PrintDetailedMap_End(json);
9728 }
9729 #endif // #if VMA_STATS_STRING_ENABLED
9730 
9731 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9732  uint32_t currentFrameIndex,
9733  uint32_t frameInUseCount,
9734  VkDeviceSize bufferImageGranularity,
9735  VkDeviceSize allocSize,
9736  VkDeviceSize allocAlignment,
9737  bool upperAddress,
9738  VmaSuballocationType allocType,
9739  bool canMakeOtherLost,
9740  uint32_t strategy,
9741  VmaAllocationRequest* pAllocationRequest)
9742 {
9743  VMA_ASSERT(allocSize > 0);
9744  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9745  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9746  VMA_HEAVY_ASSERT(Validate());
9747  return upperAddress ?
9748  CreateAllocationRequest_UpperAddress(
9749  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9750  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9751  CreateAllocationRequest_LowerAddress(
9752  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9753  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9754 }
9755 
9756 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9757  uint32_t currentFrameIndex,
9758  uint32_t frameInUseCount,
9759  VkDeviceSize bufferImageGranularity,
9760  VkDeviceSize allocSize,
9761  VkDeviceSize allocAlignment,
9762  VmaSuballocationType allocType,
9763  bool canMakeOtherLost,
9764  uint32_t strategy,
9765  VmaAllocationRequest* pAllocationRequest)
9766 {
9767  const VkDeviceSize size = GetSize();
9768  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9769  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9770 
9771  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9772  {
9773  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9774  return false;
9775  }
9776 
9777  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9778  if(allocSize > size)
9779  {
9780  return false;
9781  }
9782  VkDeviceSize resultBaseOffset = size - allocSize;
9783  if(!suballocations2nd.empty())
9784  {
9785  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9786  resultBaseOffset = lastSuballoc.offset - allocSize;
9787  if(allocSize > lastSuballoc.offset)
9788  {
9789  return false;
9790  }
9791  }
9792 
9793  // Start from offset equal to end of free space.
9794  VkDeviceSize resultOffset = resultBaseOffset;
9795 
9796  // Apply VMA_DEBUG_MARGIN at the end.
9797  if(VMA_DEBUG_MARGIN > 0)
9798  {
9799  if(resultOffset < VMA_DEBUG_MARGIN)
9800  {
9801  return false;
9802  }
9803  resultOffset -= VMA_DEBUG_MARGIN;
9804  }
9805 
9806  // Apply alignment.
9807  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9808 
9809  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9810  // Make bigger alignment if necessary.
9811  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9812  {
9813  bool bufferImageGranularityConflict = false;
9814  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9815  {
9816  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9817  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9818  {
9819  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9820  {
9821  bufferImageGranularityConflict = true;
9822  break;
9823  }
9824  }
9825  else
9826  // Already on previous page.
9827  break;
9828  }
9829  if(bufferImageGranularityConflict)
9830  {
9831  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9832  }
9833  }
9834 
9835  // There is enough free space.
9836  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9837  suballocations1st.back().offset + suballocations1st.back().size :
9838  0;
9839  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9840  {
9841  // Check previous suballocations for BufferImageGranularity conflicts.
9842  // If conflict exists, allocation cannot be made here.
9843  if(bufferImageGranularity > 1)
9844  {
9845  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9846  {
9847  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9848  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9849  {
9850  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9851  {
9852  return false;
9853  }
9854  }
9855  else
9856  {
9857  // Already on next page.
9858  break;
9859  }
9860  }
9861  }
9862 
9863  // All tests passed: Success.
9864  pAllocationRequest->offset = resultOffset;
9865  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9866  pAllocationRequest->sumItemSize = 0;
9867  // pAllocationRequest->item unused.
9868  pAllocationRequest->itemsToMakeLostCount = 0;
9869  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9870  return true;
9871  }
9872 
9873  return false;
9874 }
9875 
9876 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9877  uint32_t currentFrameIndex,
9878  uint32_t frameInUseCount,
9879  VkDeviceSize bufferImageGranularity,
9880  VkDeviceSize allocSize,
9881  VkDeviceSize allocAlignment,
9882  VmaSuballocationType allocType,
9883  bool canMakeOtherLost,
9884  uint32_t strategy,
9885  VmaAllocationRequest* pAllocationRequest)
9886 {
9887  const VkDeviceSize size = GetSize();
9888  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9889  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9890 
9891  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9892  {
9893  // Try to allocate at the end of 1st vector.
9894 
9895  VkDeviceSize resultBaseOffset = 0;
9896  if(!suballocations1st.empty())
9897  {
9898  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9899  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9900  }
9901 
9902  // Start from offset equal to beginning of free space.
9903  VkDeviceSize resultOffset = resultBaseOffset;
9904 
9905  // Apply VMA_DEBUG_MARGIN at the beginning.
9906  if(VMA_DEBUG_MARGIN > 0)
9907  {
9908  resultOffset += VMA_DEBUG_MARGIN;
9909  }
9910 
9911  // Apply alignment.
9912  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9913 
9914  // Check previous suballocations for BufferImageGranularity conflicts.
9915  // Make bigger alignment if necessary.
9916  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9917  {
9918  bool bufferImageGranularityConflict = false;
9919  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9920  {
9921  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9922  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9923  {
9924  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9925  {
9926  bufferImageGranularityConflict = true;
9927  break;
9928  }
9929  }
9930  else
9931  // Already on previous page.
9932  break;
9933  }
9934  if(bufferImageGranularityConflict)
9935  {
9936  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9937  }
9938  }
9939 
9940  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9941  suballocations2nd.back().offset : size;
9942 
9943  // There is enough free space at the end after alignment.
9944  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9945  {
9946  // Check next suballocations for BufferImageGranularity conflicts.
9947  // If conflict exists, allocation cannot be made here.
9948  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9949  {
9950  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9951  {
9952  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9953  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9954  {
9955  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9956  {
9957  return false;
9958  }
9959  }
9960  else
9961  {
9962  // Already on previous page.
9963  break;
9964  }
9965  }
9966  }
9967 
9968  // All tests passed: Success.
9969  pAllocationRequest->offset = resultOffset;
9970  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9971  pAllocationRequest->sumItemSize = 0;
9972  // pAllocationRequest->item, customData unused.
9973  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9974  pAllocationRequest->itemsToMakeLostCount = 0;
9975  return true;
9976  }
9977  }
9978 
9979  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9980  // beginning of 1st vector as the end of free space.
9981  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9982  {
9983  VMA_ASSERT(!suballocations1st.empty());
9984 
9985  VkDeviceSize resultBaseOffset = 0;
9986  if(!suballocations2nd.empty())
9987  {
9988  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9989  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9990  }
9991 
9992  // Start from offset equal to beginning of free space.
9993  VkDeviceSize resultOffset = resultBaseOffset;
9994 
9995  // Apply VMA_DEBUG_MARGIN at the beginning.
9996  if(VMA_DEBUG_MARGIN > 0)
9997  {
9998  resultOffset += VMA_DEBUG_MARGIN;
9999  }
10000 
10001  // Apply alignment.
10002  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10003 
10004  // Check previous suballocations for BufferImageGranularity conflicts.
10005  // Make bigger alignment if necessary.
10006  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10007  {
10008  bool bufferImageGranularityConflict = false;
10009  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10010  {
10011  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10012  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10013  {
10014  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10015  {
10016  bufferImageGranularityConflict = true;
10017  break;
10018  }
10019  }
10020  else
10021  // Already on previous page.
10022  break;
10023  }
10024  if(bufferImageGranularityConflict)
10025  {
10026  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10027  }
10028  }
10029 
10030  pAllocationRequest->itemsToMakeLostCount = 0;
10031  pAllocationRequest->sumItemSize = 0;
10032  size_t index1st = m_1stNullItemsBeginCount;
10033 
10034  if(canMakeOtherLost)
10035  {
10036  while(index1st < suballocations1st.size() &&
10037  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10038  {
10039  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10040  const VmaSuballocation& suballoc = suballocations1st[index1st];
10041  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10042  {
10043  // No problem.
10044  }
10045  else
10046  {
10047  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10048  if(suballoc.hAllocation->CanBecomeLost() &&
10049  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10050  {
10051  ++pAllocationRequest->itemsToMakeLostCount;
10052  pAllocationRequest->sumItemSize += suballoc.size;
10053  }
10054  else
10055  {
10056  return false;
10057  }
10058  }
10059  ++index1st;
10060  }
10061 
10062  // Check next suballocations for BufferImageGranularity conflicts.
10063  // If conflict exists, we must mark more allocations lost or fail.
10064  if(bufferImageGranularity > 1)
10065  {
10066  while(index1st < suballocations1st.size())
10067  {
10068  const VmaSuballocation& suballoc = suballocations1st[index1st];
10069  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10070  {
10071  if(suballoc.hAllocation != VK_NULL_HANDLE)
10072  {
10073  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10074  if(suballoc.hAllocation->CanBecomeLost() &&
10075  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10076  {
10077  ++pAllocationRequest->itemsToMakeLostCount;
10078  pAllocationRequest->sumItemSize += suballoc.size;
10079  }
10080  else
10081  {
10082  return false;
10083  }
10084  }
10085  }
10086  else
10087  {
10088  // Already on next page.
10089  break;
10090  }
10091  ++index1st;
10092  }
10093  }
10094 
10095  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10096  if(index1st == suballocations1st.size() &&
10097  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10098  {
10099  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10100  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10101  }
10102  }
10103 
10104  // There is enough free space at the end after alignment.
10105  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10106  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10107  {
10108  // Check next suballocations for BufferImageGranularity conflicts.
10109  // If conflict exists, allocation cannot be made here.
10110  if(bufferImageGranularity > 1)
10111  {
10112  for(size_t nextSuballocIndex = index1st;
10113  nextSuballocIndex < suballocations1st.size();
10114  nextSuballocIndex++)
10115  {
10116  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10117  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10118  {
10119  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10120  {
10121  return false;
10122  }
10123  }
10124  else
10125  {
10126  // Already on next page.
10127  break;
10128  }
10129  }
10130  }
10131 
10132  // All tests passed: Success.
10133  pAllocationRequest->offset = resultOffset;
10134  pAllocationRequest->sumFreeSize =
10135  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10136  - resultBaseOffset
10137  - pAllocationRequest->sumItemSize;
10138  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10139  // pAllocationRequest->item, customData unused.
10140  return true;
10141  }
10142  }
10143 
10144  return false;
10145 }
10146 
10147 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10148  uint32_t currentFrameIndex,
10149  uint32_t frameInUseCount,
10150  VmaAllocationRequest* pAllocationRequest)
10151 {
10152  if(pAllocationRequest->itemsToMakeLostCount == 0)
10153  {
10154  return true;
10155  }
10156 
10157  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10158 
10159  // We always start from 1st.
10160  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10161  size_t index = m_1stNullItemsBeginCount;
10162  size_t madeLostCount = 0;
10163  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10164  {
10165  if(index == suballocations->size())
10166  {
10167  index = 0;
10168  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10169  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10170  {
10171  suballocations = &AccessSuballocations2nd();
10172  }
10173  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10174  // suballocations continues pointing at AccessSuballocations1st().
10175  VMA_ASSERT(!suballocations->empty());
10176  }
10177  VmaSuballocation& suballoc = (*suballocations)[index];
10178  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10179  {
10180  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10181  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10182  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10183  {
10184  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10185  suballoc.hAllocation = VK_NULL_HANDLE;
10186  m_SumFreeSize += suballoc.size;
10187  if(suballocations == &AccessSuballocations1st())
10188  {
10189  ++m_1stNullItemsMiddleCount;
10190  }
10191  else
10192  {
10193  ++m_2ndNullItemsCount;
10194  }
10195  ++madeLostCount;
10196  }
10197  else
10198  {
10199  return false;
10200  }
10201  }
10202  ++index;
10203  }
10204 
10205  CleanupAfterFree();
10206  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10207 
10208  return true;
10209 }
10210 
10211 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10212 {
10213  uint32_t lostAllocationCount = 0;
10214 
10215  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10216  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10217  {
10218  VmaSuballocation& suballoc = suballocations1st[i];
10219  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10220  suballoc.hAllocation->CanBecomeLost() &&
10221  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10222  {
10223  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10224  suballoc.hAllocation = VK_NULL_HANDLE;
10225  ++m_1stNullItemsMiddleCount;
10226  m_SumFreeSize += suballoc.size;
10227  ++lostAllocationCount;
10228  }
10229  }
10230 
10231  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10232  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10233  {
10234  VmaSuballocation& suballoc = suballocations2nd[i];
10235  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10236  suballoc.hAllocation->CanBecomeLost() &&
10237  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10238  {
10239  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10240  suballoc.hAllocation = VK_NULL_HANDLE;
10241  ++m_2ndNullItemsCount;
10242  m_SumFreeSize += suballoc.size;
10243  ++lostAllocationCount;
10244  }
10245  }
10246 
10247  if(lostAllocationCount)
10248  {
10249  CleanupAfterFree();
10250  }
10251 
10252  return lostAllocationCount;
10253 }
10254 
10255 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10256 {
10257  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10258  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10259  {
10260  const VmaSuballocation& suballoc = suballocations1st[i];
10261  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10262  {
10263  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10264  {
10265  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10266  return VK_ERROR_VALIDATION_FAILED_EXT;
10267  }
10268  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10269  {
10270  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10271  return VK_ERROR_VALIDATION_FAILED_EXT;
10272  }
10273  }
10274  }
10275 
10276  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10277  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10278  {
10279  const VmaSuballocation& suballoc = suballocations2nd[i];
10280  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10281  {
10282  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10283  {
10284  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10285  return VK_ERROR_VALIDATION_FAILED_EXT;
10286  }
10287  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10288  {
10289  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10290  return VK_ERROR_VALIDATION_FAILED_EXT;
10291  }
10292  }
10293  }
10294 
10295  return VK_SUCCESS;
10296 }
10297 
10298 void VmaBlockMetadata_Linear::Alloc(
10299  const VmaAllocationRequest& request,
10300  VmaSuballocationType type,
10301  VkDeviceSize allocSize,
10302  VmaAllocation hAllocation)
10303 {
10304  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10305 
10306  switch(request.type)
10307  {
10308  case VmaAllocationRequestType::UpperAddress:
10309  {
10310  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10311  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10312  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10313  suballocations2nd.push_back(newSuballoc);
10314  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10315  }
10316  break;
10317  case VmaAllocationRequestType::EndOf1st:
10318  {
10319  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10320 
10321  VMA_ASSERT(suballocations1st.empty() ||
10322  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10323  // Check if it fits before the end of the block.
10324  VMA_ASSERT(request.offset + allocSize <= GetSize());
10325 
10326  suballocations1st.push_back(newSuballoc);
10327  }
10328  break;
10329  case VmaAllocationRequestType::EndOf2nd:
10330  {
10331  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10332  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10333  VMA_ASSERT(!suballocations1st.empty() &&
10334  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10335  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10336 
10337  switch(m_2ndVectorMode)
10338  {
10339  case SECOND_VECTOR_EMPTY:
10340  // First allocation from second part ring buffer.
10341  VMA_ASSERT(suballocations2nd.empty());
10342  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10343  break;
10344  case SECOND_VECTOR_RING_BUFFER:
10345  // 2-part ring buffer is already started.
10346  VMA_ASSERT(!suballocations2nd.empty());
10347  break;
10348  case SECOND_VECTOR_DOUBLE_STACK:
10349  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10350  break;
10351  default:
10352  VMA_ASSERT(0);
10353  }
10354 
10355  suballocations2nd.push_back(newSuballoc);
10356  }
10357  break;
10358  default:
10359  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10360  }
10361 
10362  m_SumFreeSize -= newSuballoc.size;
10363 }
10364 
10365 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10366 {
10367  FreeAtOffset(allocation->GetOffset());
10368 }
10369 
10370 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10371 {
10372  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10373  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10374 
10375  if(!suballocations1st.empty())
10376  {
10377  // First allocation: Mark it as next empty at the beginning.
10378  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10379  if(firstSuballoc.offset == offset)
10380  {
10381  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10382  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10383  m_SumFreeSize += firstSuballoc.size;
10384  ++m_1stNullItemsBeginCount;
10385  CleanupAfterFree();
10386  return;
10387  }
10388  }
10389 
10390  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10391  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10392  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10393  {
10394  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10395  if(lastSuballoc.offset == offset)
10396  {
10397  m_SumFreeSize += lastSuballoc.size;
10398  suballocations2nd.pop_back();
10399  CleanupAfterFree();
10400  return;
10401  }
10402  }
10403  // Last allocation in 1st vector.
10404  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10405  {
10406  VmaSuballocation& lastSuballoc = suballocations1st.back();
10407  if(lastSuballoc.offset == offset)
10408  {
10409  m_SumFreeSize += lastSuballoc.size;
10410  suballocations1st.pop_back();
10411  CleanupAfterFree();
10412  return;
10413  }
10414  }
10415 
10416  // Item from the middle of 1st vector.
10417  {
10418  VmaSuballocation refSuballoc;
10419  refSuballoc.offset = offset;
10420  // Rest of members stays uninitialized intentionally for better performance.
10421  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10422  suballocations1st.begin() + m_1stNullItemsBeginCount,
10423  suballocations1st.end(),
10424  refSuballoc);
10425  if(it != suballocations1st.end())
10426  {
10427  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10428  it->hAllocation = VK_NULL_HANDLE;
10429  ++m_1stNullItemsMiddleCount;
10430  m_SumFreeSize += it->size;
10431  CleanupAfterFree();
10432  return;
10433  }
10434  }
10435 
10436  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10437  {
10438  // Item from the middle of 2nd vector.
10439  VmaSuballocation refSuballoc;
10440  refSuballoc.offset = offset;
10441  // Rest of members stays uninitialized intentionally for better performance.
10442  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10443  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10444  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10445  if(it != suballocations2nd.end())
10446  {
10447  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10448  it->hAllocation = VK_NULL_HANDLE;
10449  ++m_2ndNullItemsCount;
10450  m_SumFreeSize += it->size;
10451  CleanupAfterFree();
10452  return;
10453  }
10454  }
10455 
10456  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10457 }
10458 
10459 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10460 {
10461  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10462  const size_t suballocCount = AccessSuballocations1st().size();
10463  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10464 }
10465 
10466 void VmaBlockMetadata_Linear::CleanupAfterFree()
10467 {
10468  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10469  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10470 
10471  if(IsEmpty())
10472  {
10473  suballocations1st.clear();
10474  suballocations2nd.clear();
10475  m_1stNullItemsBeginCount = 0;
10476  m_1stNullItemsMiddleCount = 0;
10477  m_2ndNullItemsCount = 0;
10478  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10479  }
10480  else
10481  {
10482  const size_t suballoc1stCount = suballocations1st.size();
10483  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10484  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10485 
10486  // Find more null items at the beginning of 1st vector.
10487  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10488  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10489  {
10490  ++m_1stNullItemsBeginCount;
10491  --m_1stNullItemsMiddleCount;
10492  }
10493 
10494  // Find more null items at the end of 1st vector.
10495  while(m_1stNullItemsMiddleCount > 0 &&
10496  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10497  {
10498  --m_1stNullItemsMiddleCount;
10499  suballocations1st.pop_back();
10500  }
10501 
10502  // Find more null items at the end of 2nd vector.
10503  while(m_2ndNullItemsCount > 0 &&
10504  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10505  {
10506  --m_2ndNullItemsCount;
10507  suballocations2nd.pop_back();
10508  }
10509 
10510  // Find more null items at the beginning of 2nd vector.
10511  while(m_2ndNullItemsCount > 0 &&
10512  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10513  {
10514  --m_2ndNullItemsCount;
10515  suballocations2nd.remove(0);
10516  }
10517 
10518  if(ShouldCompact1st())
10519  {
10520  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10521  size_t srcIndex = m_1stNullItemsBeginCount;
10522  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10523  {
10524  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10525  {
10526  ++srcIndex;
10527  }
10528  if(dstIndex != srcIndex)
10529  {
10530  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10531  }
10532  ++srcIndex;
10533  }
10534  suballocations1st.resize(nonNullItemCount);
10535  m_1stNullItemsBeginCount = 0;
10536  m_1stNullItemsMiddleCount = 0;
10537  }
10538 
10539  // 2nd vector became empty.
10540  if(suballocations2nd.empty())
10541  {
10542  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10543  }
10544 
10545  // 1st vector became empty.
10546  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10547  {
10548  suballocations1st.clear();
10549  m_1stNullItemsBeginCount = 0;
10550 
10551  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10552  {
10553  // Swap 1st with 2nd. Now 2nd is empty.
10554  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10555  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10556  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10557  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10558  {
10559  ++m_1stNullItemsBeginCount;
10560  --m_1stNullItemsMiddleCount;
10561  }
10562  m_2ndNullItemsCount = 0;
10563  m_1stVectorIndex ^= 1;
10564  }
10565  }
10566  }
10567 
10568  VMA_HEAVY_ASSERT(Validate());
10569 }
10570 
10571 
10573 // class VmaBlockMetadata_Buddy
10574 
10575 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10576  VmaBlockMetadata(hAllocator),
10577  m_Root(VMA_NULL),
10578  m_AllocationCount(0),
10579  m_FreeCount(1),
10580  m_SumFreeSize(0)
10581 {
10582  memset(m_FreeList, 0, sizeof(m_FreeList));
10583 }
10584 
10585 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10586 {
10587  DeleteNode(m_Root);
10588 }
10589 
10590 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10591 {
10592  VmaBlockMetadata::Init(size);
10593 
10594  m_UsableSize = VmaPrevPow2(size);
10595  m_SumFreeSize = m_UsableSize;
10596 
10597  // Calculate m_LevelCount.
10598  m_LevelCount = 1;
10599  while(m_LevelCount < MAX_LEVELS &&
10600  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10601  {
10602  ++m_LevelCount;
10603  }
10604 
10605  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10606  rootNode->offset = 0;
10607  rootNode->type = Node::TYPE_FREE;
10608  rootNode->parent = VMA_NULL;
10609  rootNode->buddy = VMA_NULL;
10610 
10611  m_Root = rootNode;
10612  AddToFreeListFront(0, rootNode);
10613 }
10614 
10615 bool VmaBlockMetadata_Buddy::Validate() const
10616 {
10617  // Validate tree.
10618  ValidationContext ctx;
10619  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10620  {
10621  VMA_VALIDATE(false && "ValidateNode failed.");
10622  }
10623  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10624  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10625 
10626  // Validate free node lists.
10627  for(uint32_t level = 0; level < m_LevelCount; ++level)
10628  {
10629  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10630  m_FreeList[level].front->free.prev == VMA_NULL);
10631 
10632  for(Node* node = m_FreeList[level].front;
10633  node != VMA_NULL;
10634  node = node->free.next)
10635  {
10636  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10637 
10638  if(node->free.next == VMA_NULL)
10639  {
10640  VMA_VALIDATE(m_FreeList[level].back == node);
10641  }
10642  else
10643  {
10644  VMA_VALIDATE(node->free.next->free.prev == node);
10645  }
10646  }
10647  }
10648 
10649  // Validate that free lists ar higher levels are empty.
10650  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10651  {
10652  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10653  }
10654 
10655  return true;
10656 }
10657 
10658 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10659 {
10660  for(uint32_t level = 0; level < m_LevelCount; ++level)
10661  {
10662  if(m_FreeList[level].front != VMA_NULL)
10663  {
10664  return LevelToNodeSize(level);
10665  }
10666  }
10667  return 0;
10668 }
10669 
10670 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10671 {
10672  const VkDeviceSize unusableSize = GetUnusableSize();
10673 
10674  outInfo.blockCount = 1;
10675 
10676  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10677  outInfo.usedBytes = outInfo.unusedBytes = 0;
10678 
10679  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10680  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10681  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10682 
10683  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10684 
10685  if(unusableSize > 0)
10686  {
10687  ++outInfo.unusedRangeCount;
10688  outInfo.unusedBytes += unusableSize;
10689  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10690  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10691  }
10692 }
10693 
10694 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10695 {
10696  const VkDeviceSize unusableSize = GetUnusableSize();
10697 
10698  inoutStats.size += GetSize();
10699  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10700  inoutStats.allocationCount += m_AllocationCount;
10701  inoutStats.unusedRangeCount += m_FreeCount;
10702  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10703 
10704  if(unusableSize > 0)
10705  {
10706  ++inoutStats.unusedRangeCount;
10707  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10708  }
10709 }
10710 
10711 #if VMA_STATS_STRING_ENABLED
10712 
10713 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10714 {
10715  // TODO optimize
10716  VmaStatInfo stat;
10717  CalcAllocationStatInfo(stat);
10718 
10719  PrintDetailedMap_Begin(
10720  json,
10721  stat.unusedBytes,
10722  stat.allocationCount,
10723  stat.unusedRangeCount);
10724 
10725  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10726 
10727  const VkDeviceSize unusableSize = GetUnusableSize();
10728  if(unusableSize > 0)
10729  {
10730  PrintDetailedMap_UnusedRange(json,
10731  m_UsableSize, // offset
10732  unusableSize); // size
10733  }
10734 
10735  PrintDetailedMap_End(json);
10736 }
10737 
10738 #endif // #if VMA_STATS_STRING_ENABLED
10739 
10740 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10741  uint32_t currentFrameIndex,
10742  uint32_t frameInUseCount,
10743  VkDeviceSize bufferImageGranularity,
10744  VkDeviceSize allocSize,
10745  VkDeviceSize allocAlignment,
10746  bool upperAddress,
10747  VmaSuballocationType allocType,
10748  bool canMakeOtherLost,
10749  uint32_t strategy,
10750  VmaAllocationRequest* pAllocationRequest)
10751 {
10752  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10753 
10754  // Simple way to respect bufferImageGranularity. May be optimized some day.
10755  // Whenever it might be an OPTIMAL image...
10756  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10757  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10758  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10759  {
10760  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10761  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10762  }
10763 
10764  if(allocSize > m_UsableSize)
10765  {
10766  return false;
10767  }
10768 
10769  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10770  for(uint32_t level = targetLevel + 1; level--; )
10771  {
10772  for(Node* freeNode = m_FreeList[level].front;
10773  freeNode != VMA_NULL;
10774  freeNode = freeNode->free.next)
10775  {
10776  if(freeNode->offset % allocAlignment == 0)
10777  {
10778  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10779  pAllocationRequest->offset = freeNode->offset;
10780  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10781  pAllocationRequest->sumItemSize = 0;
10782  pAllocationRequest->itemsToMakeLostCount = 0;
10783  pAllocationRequest->customData = (void*)(uintptr_t)level;
10784  return true;
10785  }
10786  }
10787  }
10788 
10789  return false;
10790 }
10791 
10792 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10793  uint32_t currentFrameIndex,
10794  uint32_t frameInUseCount,
10795  VmaAllocationRequest* pAllocationRequest)
10796 {
10797  /*
10798  Lost allocations are not supported in buddy allocator at the moment.
10799  Support might be added in the future.
10800  */
10801  return pAllocationRequest->itemsToMakeLostCount == 0;
10802 }
10803 
10804 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10805 {
10806  /*
10807  Lost allocations are not supported in buddy allocator at the moment.
10808  Support might be added in the future.
10809  */
10810  return 0;
10811 }
10812 
10813 void VmaBlockMetadata_Buddy::Alloc(
10814  const VmaAllocationRequest& request,
10815  VmaSuballocationType type,
10816  VkDeviceSize allocSize,
10817  VmaAllocation hAllocation)
10818 {
10819  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10820 
10821  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10822  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10823 
10824  Node* currNode = m_FreeList[currLevel].front;
10825  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10826  while(currNode->offset != request.offset)
10827  {
10828  currNode = currNode->free.next;
10829  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10830  }
10831 
10832  // Go down, splitting free nodes.
10833  while(currLevel < targetLevel)
10834  {
10835  // currNode is already first free node at currLevel.
10836  // Remove it from list of free nodes at this currLevel.
10837  RemoveFromFreeList(currLevel, currNode);
10838 
10839  const uint32_t childrenLevel = currLevel + 1;
10840 
10841  // Create two free sub-nodes.
10842  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10843  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10844 
10845  leftChild->offset = currNode->offset;
10846  leftChild->type = Node::TYPE_FREE;
10847  leftChild->parent = currNode;
10848  leftChild->buddy = rightChild;
10849 
10850  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10851  rightChild->type = Node::TYPE_FREE;
10852  rightChild->parent = currNode;
10853  rightChild->buddy = leftChild;
10854 
10855  // Convert current currNode to split type.
10856  currNode->type = Node::TYPE_SPLIT;
10857  currNode->split.leftChild = leftChild;
10858 
10859  // Add child nodes to free list. Order is important!
10860  AddToFreeListFront(childrenLevel, rightChild);
10861  AddToFreeListFront(childrenLevel, leftChild);
10862 
10863  ++m_FreeCount;
10864  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10865  ++currLevel;
10866  currNode = m_FreeList[currLevel].front;
10867 
10868  /*
10869  We can be sure that currNode, as left child of node previously split,
10870  also fullfills the alignment requirement.
10871  */
10872  }
10873 
10874  // Remove from free list.
10875  VMA_ASSERT(currLevel == targetLevel &&
10876  currNode != VMA_NULL &&
10877  currNode->type == Node::TYPE_FREE);
10878  RemoveFromFreeList(currLevel, currNode);
10879 
10880  // Convert to allocation node.
10881  currNode->type = Node::TYPE_ALLOCATION;
10882  currNode->allocation.alloc = hAllocation;
10883 
10884  ++m_AllocationCount;
10885  --m_FreeCount;
10886  m_SumFreeSize -= allocSize;
10887 }
10888 
10889 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10890 {
10891  if(node->type == Node::TYPE_SPLIT)
10892  {
10893  DeleteNode(node->split.leftChild->buddy);
10894  DeleteNode(node->split.leftChild);
10895  }
10896 
10897  vma_delete(GetAllocationCallbacks(), node);
10898 }
10899 
10900 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10901 {
10902  VMA_VALIDATE(level < m_LevelCount);
10903  VMA_VALIDATE(curr->parent == parent);
10904  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10905  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10906  switch(curr->type)
10907  {
10908  case Node::TYPE_FREE:
10909  // curr->free.prev, next are validated separately.
10910  ctx.calculatedSumFreeSize += levelNodeSize;
10911  ++ctx.calculatedFreeCount;
10912  break;
10913  case Node::TYPE_ALLOCATION:
10914  ++ctx.calculatedAllocationCount;
10915  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10916  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10917  break;
10918  case Node::TYPE_SPLIT:
10919  {
10920  const uint32_t childrenLevel = level + 1;
10921  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10922  const Node* const leftChild = curr->split.leftChild;
10923  VMA_VALIDATE(leftChild != VMA_NULL);
10924  VMA_VALIDATE(leftChild->offset == curr->offset);
10925  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10926  {
10927  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10928  }
10929  const Node* const rightChild = leftChild->buddy;
10930  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10931  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10932  {
10933  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10934  }
10935  }
10936  break;
10937  default:
10938  return false;
10939  }
10940 
10941  return true;
10942 }
10943 
10944 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10945 {
10946  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10947  uint32_t level = 0;
10948  VkDeviceSize currLevelNodeSize = m_UsableSize;
10949  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10950  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10951  {
10952  ++level;
10953  currLevelNodeSize = nextLevelNodeSize;
10954  nextLevelNodeSize = currLevelNodeSize >> 1;
10955  }
10956  return level;
10957 }
10958 
10959 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10960 {
10961  // Find node and level.
10962  Node* node = m_Root;
10963  VkDeviceSize nodeOffset = 0;
10964  uint32_t level = 0;
10965  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10966  while(node->type == Node::TYPE_SPLIT)
10967  {
10968  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10969  if(offset < nodeOffset + nextLevelSize)
10970  {
10971  node = node->split.leftChild;
10972  }
10973  else
10974  {
10975  node = node->split.leftChild->buddy;
10976  nodeOffset += nextLevelSize;
10977  }
10978  ++level;
10979  levelNodeSize = nextLevelSize;
10980  }
10981 
10982  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10983  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10984 
10985  ++m_FreeCount;
10986  --m_AllocationCount;
10987  m_SumFreeSize += alloc->GetSize();
10988 
10989  node->type = Node::TYPE_FREE;
10990 
10991  // Join free nodes if possible.
10992  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10993  {
10994  RemoveFromFreeList(level, node->buddy);
10995  Node* const parent = node->parent;
10996 
10997  vma_delete(GetAllocationCallbacks(), node->buddy);
10998  vma_delete(GetAllocationCallbacks(), node);
10999  parent->type = Node::TYPE_FREE;
11000 
11001  node = parent;
11002  --level;
11003  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11004  --m_FreeCount;
11005  }
11006 
11007  AddToFreeListFront(level, node);
11008 }
11009 
11010 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11011 {
11012  switch(node->type)
11013  {
11014  case Node::TYPE_FREE:
11015  ++outInfo.unusedRangeCount;
11016  outInfo.unusedBytes += levelNodeSize;
11017  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11018  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11019  break;
11020  case Node::TYPE_ALLOCATION:
11021  {
11022  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11023  ++outInfo.allocationCount;
11024  outInfo.usedBytes += allocSize;
11025  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11026  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11027 
11028  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11029  if(unusedRangeSize > 0)
11030  {
11031  ++outInfo.unusedRangeCount;
11032  outInfo.unusedBytes += unusedRangeSize;
11033  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11034  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11035  }
11036  }
11037  break;
11038  case Node::TYPE_SPLIT:
11039  {
11040  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11041  const Node* const leftChild = node->split.leftChild;
11042  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11043  const Node* const rightChild = leftChild->buddy;
11044  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11045  }
11046  break;
11047  default:
11048  VMA_ASSERT(0);
11049  }
11050 }
11051 
11052 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11053 {
11054  VMA_ASSERT(node->type == Node::TYPE_FREE);
11055 
11056  // List is empty.
11057  Node* const frontNode = m_FreeList[level].front;
11058  if(frontNode == VMA_NULL)
11059  {
11060  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11061  node->free.prev = node->free.next = VMA_NULL;
11062  m_FreeList[level].front = m_FreeList[level].back = node;
11063  }
11064  else
11065  {
11066  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11067  node->free.prev = VMA_NULL;
11068  node->free.next = frontNode;
11069  frontNode->free.prev = node;
11070  m_FreeList[level].front = node;
11071  }
11072 }
11073 
11074 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11075 {
11076  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11077 
11078  // It is at the front.
11079  if(node->free.prev == VMA_NULL)
11080  {
11081  VMA_ASSERT(m_FreeList[level].front == node);
11082  m_FreeList[level].front = node->free.next;
11083  }
11084  else
11085  {
11086  Node* const prevFreeNode = node->free.prev;
11087  VMA_ASSERT(prevFreeNode->free.next == node);
11088  prevFreeNode->free.next = node->free.next;
11089  }
11090 
11091  // It is at the back.
11092  if(node->free.next == VMA_NULL)
11093  {
11094  VMA_ASSERT(m_FreeList[level].back == node);
11095  m_FreeList[level].back = node->free.prev;
11096  }
11097  else
11098  {
11099  Node* const nextFreeNode = node->free.next;
11100  VMA_ASSERT(nextFreeNode->free.prev == node);
11101  nextFreeNode->free.prev = node->free.prev;
11102  }
11103 }
11104 
11105 #if VMA_STATS_STRING_ENABLED
11106 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11107 {
11108  switch(node->type)
11109  {
11110  case Node::TYPE_FREE:
11111  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11112  break;
11113  case Node::TYPE_ALLOCATION:
11114  {
11115  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11116  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11117  if(allocSize < levelNodeSize)
11118  {
11119  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11120  }
11121  }
11122  break;
11123  case Node::TYPE_SPLIT:
11124  {
11125  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11126  const Node* const leftChild = node->split.leftChild;
11127  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11128  const Node* const rightChild = leftChild->buddy;
11129  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11130  }
11131  break;
11132  default:
11133  VMA_ASSERT(0);
11134  }
11135 }
11136 #endif // #if VMA_STATS_STRING_ENABLED
11137 
11138 
11140 // class VmaDeviceMemoryBlock
11141 
11142 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11143  m_pMetadata(VMA_NULL),
11144  m_MemoryTypeIndex(UINT32_MAX),
11145  m_Id(0),
11146  m_hMemory(VK_NULL_HANDLE),
11147  m_MapCount(0),
11148  m_pMappedData(VMA_NULL)
11149 {
11150 }
11151 
11152 void VmaDeviceMemoryBlock::Init(
11153  VmaAllocator hAllocator,
11154  VmaPool hParentPool,
11155  uint32_t newMemoryTypeIndex,
11156  VkDeviceMemory newMemory,
11157  VkDeviceSize newSize,
11158  uint32_t id,
11159  uint32_t algorithm)
11160 {
11161  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11162 
11163  m_hParentPool = hParentPool;
11164  m_MemoryTypeIndex = newMemoryTypeIndex;
11165  m_Id = id;
11166  m_hMemory = newMemory;
11167 
11168  switch(algorithm)
11169  {
11171  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11172  break;
11174  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11175  break;
11176  default:
11177  VMA_ASSERT(0);
11178  // Fall-through.
11179  case 0:
11180  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11181  }
11182  m_pMetadata->Init(newSize);
11183 }
11184 
11185 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11186 {
11187  // This is the most important assert in the entire library.
11188  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11189  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11190 
11191  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11192  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11193  m_hMemory = VK_NULL_HANDLE;
11194 
11195  vma_delete(allocator, m_pMetadata);
11196  m_pMetadata = VMA_NULL;
11197 }
11198 
11199 bool VmaDeviceMemoryBlock::Validate() const
11200 {
11201  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11202  (m_pMetadata->GetSize() != 0));
11203 
11204  return m_pMetadata->Validate();
11205 }
11206 
11207 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11208 {
11209  void* pData = nullptr;
11210  VkResult res = Map(hAllocator, 1, &pData);
11211  if(res != VK_SUCCESS)
11212  {
11213  return res;
11214  }
11215 
11216  res = m_pMetadata->CheckCorruption(pData);
11217 
11218  Unmap(hAllocator, 1);
11219 
11220  return res;
11221 }
11222 
11223 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11224 {
11225  if(count == 0)
11226  {
11227  return VK_SUCCESS;
11228  }
11229 
11230  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11231  if(m_MapCount != 0)
11232  {
11233  m_MapCount += count;
11234  VMA_ASSERT(m_pMappedData != VMA_NULL);
11235  if(ppData != VMA_NULL)
11236  {
11237  *ppData = m_pMappedData;
11238  }
11239  return VK_SUCCESS;
11240  }
11241  else
11242  {
11243  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11244  hAllocator->m_hDevice,
11245  m_hMemory,
11246  0, // offset
11247  VK_WHOLE_SIZE,
11248  0, // flags
11249  &m_pMappedData);
11250  if(result == VK_SUCCESS)
11251  {
11252  if(ppData != VMA_NULL)
11253  {
11254  *ppData = m_pMappedData;
11255  }
11256  m_MapCount = count;
11257  }
11258  return result;
11259  }
11260 }
11261 
11262 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11263 {
11264  if(count == 0)
11265  {
11266  return;
11267  }
11268 
11269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11270  if(m_MapCount >= count)
11271  {
11272  m_MapCount -= count;
11273  if(m_MapCount == 0)
11274  {
11275  m_pMappedData = VMA_NULL;
11276  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11277  }
11278  }
11279  else
11280  {
11281  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11282  }
11283 }
11284 
11285 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11286 {
11287  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11288  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11289 
11290  void* pData;
11291  VkResult res = Map(hAllocator, 1, &pData);
11292  if(res != VK_SUCCESS)
11293  {
11294  return res;
11295  }
11296 
11297  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11298  VmaWriteMagicValue(pData, allocOffset + allocSize);
11299 
11300  Unmap(hAllocator, 1);
11301 
11302  return VK_SUCCESS;
11303 }
11304 
11305 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11306 {
11307  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11308  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11309 
11310  void* pData;
11311  VkResult res = Map(hAllocator, 1, &pData);
11312  if(res != VK_SUCCESS)
11313  {
11314  return res;
11315  }
11316 
11317  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11318  {
11319  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11320  }
11321  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11322  {
11323  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11324  }
11325 
11326  Unmap(hAllocator, 1);
11327 
11328  return VK_SUCCESS;
11329 }
11330 
11331 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11332  const VmaAllocator hAllocator,
11333  const VmaAllocation hAllocation,
11334  VkBuffer hBuffer)
11335 {
11336  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11337  hAllocation->GetBlock() == this);
11338  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11339  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11340  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11341  hAllocator->m_hDevice,
11342  hBuffer,
11343  m_hMemory,
11344  hAllocation->GetOffset());
11345 }
11346 
11347 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11348  const VmaAllocator hAllocator,
11349  const VmaAllocation hAllocation,
11350  VkImage hImage)
11351 {
11352  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11353  hAllocation->GetBlock() == this);
11354  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11355  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11356  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11357  hAllocator->m_hDevice,
11358  hImage,
11359  m_hMemory,
11360  hAllocation->GetOffset());
11361 }
11362 
11363 static void InitStatInfo(VmaStatInfo& outInfo)
11364 {
11365  memset(&outInfo, 0, sizeof(outInfo));
11366  outInfo.allocationSizeMin = UINT64_MAX;
11367  outInfo.unusedRangeSizeMin = UINT64_MAX;
11368 }
11369 
11370 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11371 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11372 {
11373  inoutInfo.blockCount += srcInfo.blockCount;
11374  inoutInfo.allocationCount += srcInfo.allocationCount;
11375  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11376  inoutInfo.usedBytes += srcInfo.usedBytes;
11377  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11378  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11379  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11380  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11381  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11382 }
11383 
11384 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11385 {
11386  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11387  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11388  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11389  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11390 }
11391 
11392 VmaPool_T::VmaPool_T(
11393  VmaAllocator hAllocator,
11394  const VmaPoolCreateInfo& createInfo,
11395  VkDeviceSize preferredBlockSize) :
11396  m_BlockVector(
11397  hAllocator,
11398  this, // hParentPool
11399  createInfo.memoryTypeIndex,
11400  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11401  createInfo.minBlockCount,
11402  createInfo.maxBlockCount,
11403  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11404  createInfo.frameInUseCount,
11405  true, // isCustomPool
11406  createInfo.blockSize != 0, // explicitBlockSize
11407  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11408  m_Id(0)
11409 {
11410 }
11411 
11412 VmaPool_T::~VmaPool_T()
11413 {
11414 }
11415 
11416 #if VMA_STATS_STRING_ENABLED
11417 
11418 #endif // #if VMA_STATS_STRING_ENABLED
11419 
11420 VmaBlockVector::VmaBlockVector(
11421  VmaAllocator hAllocator,
11422  VmaPool hParentPool,
11423  uint32_t memoryTypeIndex,
11424  VkDeviceSize preferredBlockSize,
11425  size_t minBlockCount,
11426  size_t maxBlockCount,
11427  VkDeviceSize bufferImageGranularity,
11428  uint32_t frameInUseCount,
11429  bool isCustomPool,
11430  bool explicitBlockSize,
11431  uint32_t algorithm) :
11432  m_hAllocator(hAllocator),
11433  m_hParentPool(hParentPool),
11434  m_MemoryTypeIndex(memoryTypeIndex),
11435  m_PreferredBlockSize(preferredBlockSize),
11436  m_MinBlockCount(minBlockCount),
11437  m_MaxBlockCount(maxBlockCount),
11438  m_BufferImageGranularity(bufferImageGranularity),
11439  m_FrameInUseCount(frameInUseCount),
11440  m_IsCustomPool(isCustomPool),
11441  m_ExplicitBlockSize(explicitBlockSize),
11442  m_Algorithm(algorithm),
11443  m_HasEmptyBlock(false),
11444  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11445  m_NextBlockId(0)
11446 {
11447 }
11448 
11449 VmaBlockVector::~VmaBlockVector()
11450 {
11451  for(size_t i = m_Blocks.size(); i--; )
11452  {
11453  m_Blocks[i]->Destroy(m_hAllocator);
11454  vma_delete(m_hAllocator, m_Blocks[i]);
11455  }
11456 }
11457 
11458 VkResult VmaBlockVector::CreateMinBlocks()
11459 {
11460  for(size_t i = 0; i < m_MinBlockCount; ++i)
11461  {
11462  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11463  if(res != VK_SUCCESS)
11464  {
11465  return res;
11466  }
11467  }
11468  return VK_SUCCESS;
11469 }
11470 
11471 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11472 {
11473  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11474 
11475  const size_t blockCount = m_Blocks.size();
11476 
11477  pStats->size = 0;
11478  pStats->unusedSize = 0;
11479  pStats->allocationCount = 0;
11480  pStats->unusedRangeCount = 0;
11481  pStats->unusedRangeSizeMax = 0;
11482  pStats->blockCount = blockCount;
11483 
11484  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11485  {
11486  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11487  VMA_ASSERT(pBlock);
11488  VMA_HEAVY_ASSERT(pBlock->Validate());
11489  pBlock->m_pMetadata->AddPoolStats(*pStats);
11490  }
11491 }
11492 
11493 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11494 {
11495  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11496  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11497  (VMA_DEBUG_MARGIN > 0) &&
11498  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11499  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11500 }
11501 
11502 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11503 
11504 VkResult VmaBlockVector::Allocate(
11505  uint32_t currentFrameIndex,
11506  VkDeviceSize size,
11507  VkDeviceSize alignment,
11508  const VmaAllocationCreateInfo& createInfo,
11509  VmaSuballocationType suballocType,
11510  size_t allocationCount,
11511  VmaAllocation* pAllocations)
11512 {
11513  size_t allocIndex;
11514  VkResult res = VK_SUCCESS;
11515 
11516  if(IsCorruptionDetectionEnabled())
11517  {
11518  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11519  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11520  }
11521 
11522  {
11523  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11524  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11525  {
11526  res = AllocatePage(
11527  currentFrameIndex,
11528  size,
11529  alignment,
11530  createInfo,
11531  suballocType,
11532  pAllocations + allocIndex);
11533  if(res != VK_SUCCESS)
11534  {
11535  break;
11536  }
11537  }
11538  }
11539 
11540  if(res != VK_SUCCESS)
11541  {
11542  // Free all already created allocations.
11543  while(allocIndex--)
11544  {
11545  Free(pAllocations[allocIndex]);
11546  }
11547  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11548  }
11549 
11550  return res;
11551 }
11552 
11553 VkResult VmaBlockVector::AllocatePage(
11554  uint32_t currentFrameIndex,
11555  VkDeviceSize size,
11556  VkDeviceSize alignment,
11557  const VmaAllocationCreateInfo& createInfo,
11558  VmaSuballocationType suballocType,
11559  VmaAllocation* pAllocation)
11560 {
11561  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11562  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11563  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11564  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11565  const bool canCreateNewBlock =
11566  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11567  (m_Blocks.size() < m_MaxBlockCount);
11568  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11569 
11570  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11571  // Which in turn is available only when maxBlockCount = 1.
11572  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11573  {
11574  canMakeOtherLost = false;
11575  }
11576 
11577  // Upper address can only be used with linear allocator and within single memory block.
11578  if(isUpperAddress &&
11579  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11580  {
11581  return VK_ERROR_FEATURE_NOT_PRESENT;
11582  }
11583 
11584  // Validate strategy.
11585  switch(strategy)
11586  {
11587  case 0:
11589  break;
11593  break;
11594  default:
11595  return VK_ERROR_FEATURE_NOT_PRESENT;
11596  }
11597 
11598  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11599  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11600  {
11601  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11602  }
11603 
11604  /*
11605  Under certain condition, this whole section can be skipped for optimization, so
11606  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11607  e.g. for custom pools with linear algorithm.
11608  */
11609  if(!canMakeOtherLost || canCreateNewBlock)
11610  {
11611  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11612  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11614 
11615  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11616  {
11617  // Use only last block.
11618  if(!m_Blocks.empty())
11619  {
11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11621  VMA_ASSERT(pCurrBlock);
11622  VkResult res = AllocateFromBlock(
11623  pCurrBlock,
11624  currentFrameIndex,
11625  size,
11626  alignment,
11627  allocFlagsCopy,
11628  createInfo.pUserData,
11629  suballocType,
11630  strategy,
11631  pAllocation);
11632  if(res == VK_SUCCESS)
11633  {
11634  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11635  return VK_SUCCESS;
11636  }
11637  }
11638  }
11639  else
11640  {
11642  {
11643  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11644  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11645  {
11646  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11647  VMA_ASSERT(pCurrBlock);
11648  VkResult res = AllocateFromBlock(
11649  pCurrBlock,
11650  currentFrameIndex,
11651  size,
11652  alignment,
11653  allocFlagsCopy,
11654  createInfo.pUserData,
11655  suballocType,
11656  strategy,
11657  pAllocation);
11658  if(res == VK_SUCCESS)
11659  {
11660  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11661  return VK_SUCCESS;
11662  }
11663  }
11664  }
11665  else // WORST_FIT, FIRST_FIT
11666  {
11667  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11668  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11669  {
11670  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11671  VMA_ASSERT(pCurrBlock);
11672  VkResult res = AllocateFromBlock(
11673  pCurrBlock,
11674  currentFrameIndex,
11675  size,
11676  alignment,
11677  allocFlagsCopy,
11678  createInfo.pUserData,
11679  suballocType,
11680  strategy,
11681  pAllocation);
11682  if(res == VK_SUCCESS)
11683  {
11684  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11685  return VK_SUCCESS;
11686  }
11687  }
11688  }
11689  }
11690 
11691  // 2. Try to create new block.
11692  if(canCreateNewBlock)
11693  {
11694  // Calculate optimal size for new block.
11695  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11696  uint32_t newBlockSizeShift = 0;
11697  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11698 
11699  if(!m_ExplicitBlockSize)
11700  {
11701  // Allocate 1/8, 1/4, 1/2 as first blocks.
11702  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11703  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11704  {
11705  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11706  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11707  {
11708  newBlockSize = smallerNewBlockSize;
11709  ++newBlockSizeShift;
11710  }
11711  else
11712  {
11713  break;
11714  }
11715  }
11716  }
11717 
11718  size_t newBlockIndex = 0;
11719  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11720  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11721  if(!m_ExplicitBlockSize)
11722  {
11723  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11724  {
11725  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11726  if(smallerNewBlockSize >= size)
11727  {
11728  newBlockSize = smallerNewBlockSize;
11729  ++newBlockSizeShift;
11730  res = CreateBlock(newBlockSize, &newBlockIndex);
11731  }
11732  else
11733  {
11734  break;
11735  }
11736  }
11737  }
11738 
11739  if(res == VK_SUCCESS)
11740  {
11741  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11742  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11743 
11744  res = AllocateFromBlock(
11745  pBlock,
11746  currentFrameIndex,
11747  size,
11748  alignment,
11749  allocFlagsCopy,
11750  createInfo.pUserData,
11751  suballocType,
11752  strategy,
11753  pAllocation);
11754  if(res == VK_SUCCESS)
11755  {
11756  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11757  return VK_SUCCESS;
11758  }
11759  else
11760  {
11761  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11763  }
11764  }
11765  }
11766  }
11767 
11768  // 3. Try to allocate from existing blocks with making other allocations lost.
11769  if(canMakeOtherLost)
11770  {
11771  uint32_t tryIndex = 0;
11772  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11773  {
11774  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11775  VmaAllocationRequest bestRequest = {};
11776  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11777 
11778  // 1. Search existing allocations.
11780  {
11781  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11782  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11783  {
11784  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11785  VMA_ASSERT(pCurrBlock);
11786  VmaAllocationRequest currRequest = {};
11787  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11788  currentFrameIndex,
11789  m_FrameInUseCount,
11790  m_BufferImageGranularity,
11791  size,
11792  alignment,
11793  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11794  suballocType,
11795  canMakeOtherLost,
11796  strategy,
11797  &currRequest))
11798  {
11799  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11800  if(pBestRequestBlock == VMA_NULL ||
11801  currRequestCost < bestRequestCost)
11802  {
11803  pBestRequestBlock = pCurrBlock;
11804  bestRequest = currRequest;
11805  bestRequestCost = currRequestCost;
11806 
11807  if(bestRequestCost == 0)
11808  {
11809  break;
11810  }
11811  }
11812  }
11813  }
11814  }
11815  else // WORST_FIT, FIRST_FIT
11816  {
11817  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11818  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11819  {
11820  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11821  VMA_ASSERT(pCurrBlock);
11822  VmaAllocationRequest currRequest = {};
11823  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11824  currentFrameIndex,
11825  m_FrameInUseCount,
11826  m_BufferImageGranularity,
11827  size,
11828  alignment,
11829  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11830  suballocType,
11831  canMakeOtherLost,
11832  strategy,
11833  &currRequest))
11834  {
11835  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11836  if(pBestRequestBlock == VMA_NULL ||
11837  currRequestCost < bestRequestCost ||
11839  {
11840  pBestRequestBlock = pCurrBlock;
11841  bestRequest = currRequest;
11842  bestRequestCost = currRequestCost;
11843 
11844  if(bestRequestCost == 0 ||
11846  {
11847  break;
11848  }
11849  }
11850  }
11851  }
11852  }
11853 
11854  if(pBestRequestBlock != VMA_NULL)
11855  {
11856  if(mapped)
11857  {
11858  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11859  if(res != VK_SUCCESS)
11860  {
11861  return res;
11862  }
11863  }
11864 
11865  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11866  currentFrameIndex,
11867  m_FrameInUseCount,
11868  &bestRequest))
11869  {
11870  // We no longer have an empty Allocation.
11871  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11872  {
11873  m_HasEmptyBlock = false;
11874  }
11875  // Allocate from this pBlock.
11876  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11877  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11878  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11879  (*pAllocation)->InitBlockAllocation(
11880  pBestRequestBlock,
11881  bestRequest.offset,
11882  alignment,
11883  size,
11884  suballocType,
11885  mapped,
11886  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11887  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11888  VMA_DEBUG_LOG(" Returned from existing block");
11889  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11890  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11891  {
11892  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11893  }
11894  if(IsCorruptionDetectionEnabled())
11895  {
11896  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11897  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11898  }
11899  return VK_SUCCESS;
11900  }
11901  // else: Some allocations must have been touched while we are here. Next try.
11902  }
11903  else
11904  {
11905  // Could not find place in any of the blocks - break outer loop.
11906  break;
11907  }
11908  }
11909  /* Maximum number of tries exceeded - a very unlike event when many other
11910  threads are simultaneously touching allocations making it impossible to make
11911  lost at the same time as we try to allocate. */
11912  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11913  {
11914  return VK_ERROR_TOO_MANY_OBJECTS;
11915  }
11916  }
11917 
11918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11919 }
11920 
11921 void VmaBlockVector::Free(
11922  VmaAllocation hAllocation)
11923 {
11924  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11925 
11926  // Scope for lock.
11927  {
11928  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11929 
11930  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11931 
11932  if(IsCorruptionDetectionEnabled())
11933  {
11934  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11935  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11936  }
11937 
11938  if(hAllocation->IsPersistentMap())
11939  {
11940  pBlock->Unmap(m_hAllocator, 1);
11941  }
11942 
11943  pBlock->m_pMetadata->Free(hAllocation);
11944  VMA_HEAVY_ASSERT(pBlock->Validate());
11945 
11946  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11947 
11948  // pBlock became empty after this deallocation.
11949  if(pBlock->m_pMetadata->IsEmpty())
11950  {
11951  // Already has empty Allocation. We don't want to have two, so delete this one.
11952  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11953  {
11954  pBlockToDelete = pBlock;
11955  Remove(pBlock);
11956  }
11957  // We now have first empty block.
11958  else
11959  {
11960  m_HasEmptyBlock = true;
11961  }
11962  }
11963  // pBlock didn't become empty, but we have another empty block - find and free that one.
11964  // (This is optional, heuristics.)
11965  else if(m_HasEmptyBlock)
11966  {
11967  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11968  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11969  {
11970  pBlockToDelete = pLastBlock;
11971  m_Blocks.pop_back();
11972  m_HasEmptyBlock = false;
11973  }
11974  }
11975 
11976  IncrementallySortBlocks();
11977  }
11978 
11979  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11980  // lock, for performance reason.
11981  if(pBlockToDelete != VMA_NULL)
11982  {
11983  VMA_DEBUG_LOG(" Deleted empty allocation");
11984  pBlockToDelete->Destroy(m_hAllocator);
11985  vma_delete(m_hAllocator, pBlockToDelete);
11986  }
11987 }
11988 
11989 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11990 {
11991  VkDeviceSize result = 0;
11992  for(size_t i = m_Blocks.size(); i--; )
11993  {
11994  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11995  if(result >= m_PreferredBlockSize)
11996  {
11997  break;
11998  }
11999  }
12000  return result;
12001 }
12002 
12003 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12004 {
12005  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12006  {
12007  if(m_Blocks[blockIndex] == pBlock)
12008  {
12009  VmaVectorRemove(m_Blocks, blockIndex);
12010  return;
12011  }
12012  }
12013  VMA_ASSERT(0);
12014 }
12015 
12016 void VmaBlockVector::IncrementallySortBlocks()
12017 {
12018  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12019  {
12020  // Bubble sort only until first swap.
12021  for(size_t i = 1; i < m_Blocks.size(); ++i)
12022  {
12023  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12024  {
12025  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12026  return;
12027  }
12028  }
12029  }
12030 }
12031 
12032 VkResult VmaBlockVector::AllocateFromBlock(
12033  VmaDeviceMemoryBlock* pBlock,
12034  uint32_t currentFrameIndex,
12035  VkDeviceSize size,
12036  VkDeviceSize alignment,
12037  VmaAllocationCreateFlags allocFlags,
12038  void* pUserData,
12039  VmaSuballocationType suballocType,
12040  uint32_t strategy,
12041  VmaAllocation* pAllocation)
12042 {
12043  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12044  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12045  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12046  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12047 
12048  VmaAllocationRequest currRequest = {};
12049  if(pBlock->m_pMetadata->CreateAllocationRequest(
12050  currentFrameIndex,
12051  m_FrameInUseCount,
12052  m_BufferImageGranularity,
12053  size,
12054  alignment,
12055  isUpperAddress,
12056  suballocType,
12057  false, // canMakeOtherLost
12058  strategy,
12059  &currRequest))
12060  {
12061  // Allocate from pCurrBlock.
12062  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12063 
12064  if(mapped)
12065  {
12066  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12067  if(res != VK_SUCCESS)
12068  {
12069  return res;
12070  }
12071  }
12072 
12073  // We no longer have an empty Allocation.
12074  if(pBlock->m_pMetadata->IsEmpty())
12075  {
12076  m_HasEmptyBlock = false;
12077  }
12078 
12079  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12080  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12081  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12082  (*pAllocation)->InitBlockAllocation(
12083  pBlock,
12084  currRequest.offset,
12085  alignment,
12086  size,
12087  suballocType,
12088  mapped,
12089  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12090  VMA_HEAVY_ASSERT(pBlock->Validate());
12091  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12092  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12093  {
12094  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12095  }
12096  if(IsCorruptionDetectionEnabled())
12097  {
12098  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12099  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12100  }
12101  return VK_SUCCESS;
12102  }
12103  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12104 }
12105 
12106 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12107 {
12108  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12109  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12110  allocInfo.allocationSize = blockSize;
12111  VkDeviceMemory mem = VK_NULL_HANDLE;
12112  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12113  if(res < 0)
12114  {
12115  return res;
12116  }
12117 
12118  // New VkDeviceMemory successfully created.
12119 
12120  // Create new Allocation for it.
12121  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12122  pBlock->Init(
12123  m_hAllocator,
12124  m_hParentPool,
12125  m_MemoryTypeIndex,
12126  mem,
12127  allocInfo.allocationSize,
12128  m_NextBlockId++,
12129  m_Algorithm);
12130 
12131  m_Blocks.push_back(pBlock);
12132  if(pNewBlockIndex != VMA_NULL)
12133  {
12134  *pNewBlockIndex = m_Blocks.size() - 1;
12135  }
12136 
12137  return VK_SUCCESS;
12138 }
12139 
12140 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12141  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12142  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12143 {
12144  const size_t blockCount = m_Blocks.size();
12145  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12146 
12147  enum BLOCK_FLAG
12148  {
12149  BLOCK_FLAG_USED = 0x00000001,
12150  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12151  };
12152 
12153  struct BlockInfo
12154  {
12155  uint32_t flags;
12156  void* pMappedData;
12157  };
12158  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12159  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12160  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12161 
12162  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12163  const size_t moveCount = moves.size();
12164  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12165  {
12166  const VmaDefragmentationMove& move = moves[moveIndex];
12167  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12168  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12169  }
12170 
12171  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12172 
12173  // Go over all blocks. Get mapped pointer or map if necessary.
12174  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12175  {
12176  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12177  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12178  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12179  {
12180  currBlockInfo.pMappedData = pBlock->GetMappedData();
12181  // It is not originally mapped - map it.
12182  if(currBlockInfo.pMappedData == VMA_NULL)
12183  {
12184  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12185  if(pDefragCtx->res == VK_SUCCESS)
12186  {
12187  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12188  }
12189  }
12190  }
12191  }
12192 
12193  // Go over all moves. Do actual data transfer.
12194  if(pDefragCtx->res == VK_SUCCESS)
12195  {
12196  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12197  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12198 
12199  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12200  {
12201  const VmaDefragmentationMove& move = moves[moveIndex];
12202 
12203  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12204  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12205 
12206  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12207 
12208  // Invalidate source.
12209  if(isNonCoherent)
12210  {
12211  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12212  memRange.memory = pSrcBlock->GetDeviceMemory();
12213  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12214  memRange.size = VMA_MIN(
12215  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12216  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12217  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12218  }
12219 
12220  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12221  memmove(
12222  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12223  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12224  static_cast<size_t>(move.size));
12225 
12226  if(IsCorruptionDetectionEnabled())
12227  {
12228  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12229  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12230  }
12231 
12232  // Flush destination.
12233  if(isNonCoherent)
12234  {
12235  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12236  memRange.memory = pDstBlock->GetDeviceMemory();
12237  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12238  memRange.size = VMA_MIN(
12239  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12240  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12241  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12242  }
12243  }
12244  }
12245 
12246  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12247  // Regardless of pCtx->res == VK_SUCCESS.
12248  for(size_t blockIndex = blockCount; blockIndex--; )
12249  {
12250  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12251  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12252  {
12253  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12254  pBlock->Unmap(m_hAllocator, 1);
12255  }
12256  }
12257 }
12258 
12259 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12260  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12261  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12262  VkCommandBuffer commandBuffer)
12263 {
12264  const size_t blockCount = m_Blocks.size();
12265 
12266  pDefragCtx->blockContexts.resize(blockCount);
12267  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12268 
12269  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12270  const size_t moveCount = moves.size();
12271  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12272  {
12273  const VmaDefragmentationMove& move = moves[moveIndex];
12274  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12275  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12276  }
12277 
12278  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12279 
12280  // Go over all blocks. Create and bind buffer for whole block if necessary.
12281  {
12282  VkBufferCreateInfo bufCreateInfo;
12283  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12284 
12285  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12286  {
12287  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12288  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12289  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12290  {
12291  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12292  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12293  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12294  if(pDefragCtx->res == VK_SUCCESS)
12295  {
12296  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12297  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12298  }
12299  }
12300  }
12301  }
12302 
12303  // Go over all moves. Post data transfer commands to command buffer.
12304  if(pDefragCtx->res == VK_SUCCESS)
12305  {
12306  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12307  {
12308  const VmaDefragmentationMove& move = moves[moveIndex];
12309 
12310  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12311  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12312 
12313  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12314 
12315  VkBufferCopy region = {
12316  move.srcOffset,
12317  move.dstOffset,
12318  move.size };
12319  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12320  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12321  }
12322  }
12323 
12324  // Save buffers to defrag context for later destruction.
12325  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12326  {
12327  pDefragCtx->res = VK_NOT_READY;
12328  }
12329 }
12330 
12331 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12332 {
12333  m_HasEmptyBlock = false;
12334  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12335  {
12336  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12337  if(pBlock->m_pMetadata->IsEmpty())
12338  {
12339  if(m_Blocks.size() > m_MinBlockCount)
12340  {
12341  if(pDefragmentationStats != VMA_NULL)
12342  {
12343  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12344  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12345  }
12346 
12347  VmaVectorRemove(m_Blocks, blockIndex);
12348  pBlock->Destroy(m_hAllocator);
12349  vma_delete(m_hAllocator, pBlock);
12350  }
12351  else
12352  {
12353  m_HasEmptyBlock = true;
12354  }
12355  }
12356  }
12357 }
12358 
12359 #if VMA_STATS_STRING_ENABLED
12360 
12361 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12362 {
12363  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12364 
12365  json.BeginObject();
12366 
12367  if(m_IsCustomPool)
12368  {
12369  json.WriteString("MemoryTypeIndex");
12370  json.WriteNumber(m_MemoryTypeIndex);
12371 
12372  json.WriteString("BlockSize");
12373  json.WriteNumber(m_PreferredBlockSize);
12374 
12375  json.WriteString("BlockCount");
12376  json.BeginObject(true);
12377  if(m_MinBlockCount > 0)
12378  {
12379  json.WriteString("Min");
12380  json.WriteNumber((uint64_t)m_MinBlockCount);
12381  }
12382  if(m_MaxBlockCount < SIZE_MAX)
12383  {
12384  json.WriteString("Max");
12385  json.WriteNumber((uint64_t)m_MaxBlockCount);
12386  }
12387  json.WriteString("Cur");
12388  json.WriteNumber((uint64_t)m_Blocks.size());
12389  json.EndObject();
12390 
12391  if(m_FrameInUseCount > 0)
12392  {
12393  json.WriteString("FrameInUseCount");
12394  json.WriteNumber(m_FrameInUseCount);
12395  }
12396 
12397  if(m_Algorithm != 0)
12398  {
12399  json.WriteString("Algorithm");
12400  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12401  }
12402  }
12403  else
12404  {
12405  json.WriteString("PreferredBlockSize");
12406  json.WriteNumber(m_PreferredBlockSize);
12407  }
12408 
12409  json.WriteString("Blocks");
12410  json.BeginObject();
12411  for(size_t i = 0; i < m_Blocks.size(); ++i)
12412  {
12413  json.BeginString();
12414  json.ContinueString(m_Blocks[i]->GetId());
12415  json.EndString();
12416 
12417  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12418  }
12419  json.EndObject();
12420 
12421  json.EndObject();
12422 }
12423 
12424 #endif // #if VMA_STATS_STRING_ENABLED
12425 
12426 void VmaBlockVector::Defragment(
12427  class VmaBlockVectorDefragmentationContext* pCtx,
12428  VmaDefragmentationStats* pStats,
12429  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12430  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12431  VkCommandBuffer commandBuffer)
12432 {
12433  pCtx->res = VK_SUCCESS;
12434 
12435  const VkMemoryPropertyFlags memPropFlags =
12436  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12437  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12438  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12439 
12440  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12441  isHostVisible;
12442  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12443  !IsCorruptionDetectionEnabled() &&
12444  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12445 
12446  // There are options to defragment this memory type.
12447  if(canDefragmentOnCpu || canDefragmentOnGpu)
12448  {
12449  bool defragmentOnGpu;
12450  // There is only one option to defragment this memory type.
12451  if(canDefragmentOnGpu != canDefragmentOnCpu)
12452  {
12453  defragmentOnGpu = canDefragmentOnGpu;
12454  }
12455  // Both options are available: Heuristics to choose the best one.
12456  else
12457  {
12458  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12459  m_hAllocator->IsIntegratedGpu();
12460  }
12461 
12462  bool overlappingMoveSupported = !defragmentOnGpu;
12463 
12464  if(m_hAllocator->m_UseMutex)
12465  {
12466  m_Mutex.LockWrite();
12467  pCtx->mutexLocked = true;
12468  }
12469 
12470  pCtx->Begin(overlappingMoveSupported);
12471 
12472  // Defragment.
12473 
12474  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12475  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12476  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12477  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12478  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12479 
12480  // Accumulate statistics.
12481  if(pStats != VMA_NULL)
12482  {
12483  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12484  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12485  pStats->bytesMoved += bytesMoved;
12486  pStats->allocationsMoved += allocationsMoved;
12487  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12488  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12489  if(defragmentOnGpu)
12490  {
12491  maxGpuBytesToMove -= bytesMoved;
12492  maxGpuAllocationsToMove -= allocationsMoved;
12493  }
12494  else
12495  {
12496  maxCpuBytesToMove -= bytesMoved;
12497  maxCpuAllocationsToMove -= allocationsMoved;
12498  }
12499  }
12500 
12501  if(pCtx->res >= VK_SUCCESS)
12502  {
12503  if(defragmentOnGpu)
12504  {
12505  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12506  }
12507  else
12508  {
12509  ApplyDefragmentationMovesCpu(pCtx, moves);
12510  }
12511  }
12512  }
12513 }
12514 
12515 void VmaBlockVector::DefragmentationEnd(
12516  class VmaBlockVectorDefragmentationContext* pCtx,
12517  VmaDefragmentationStats* pStats)
12518 {
12519  // Destroy buffers.
12520  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12521  {
12522  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12523  if(blockCtx.hBuffer)
12524  {
12525  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12526  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12527  }
12528  }
12529 
12530  if(pCtx->res >= VK_SUCCESS)
12531  {
12532  FreeEmptyBlocks(pStats);
12533  }
12534 
12535  if(pCtx->mutexLocked)
12536  {
12537  VMA_ASSERT(m_hAllocator->m_UseMutex);
12538  m_Mutex.UnlockWrite();
12539  }
12540 }
12541 
12542 size_t VmaBlockVector::CalcAllocationCount() const
12543 {
12544  size_t result = 0;
12545  for(size_t i = 0; i < m_Blocks.size(); ++i)
12546  {
12547  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12548  }
12549  return result;
12550 }
12551 
12552 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12553 {
12554  if(m_BufferImageGranularity == 1)
12555  {
12556  return false;
12557  }
12558  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12559  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12560  {
12561  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12562  VMA_ASSERT(m_Algorithm == 0);
12563  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12564  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12565  {
12566  return true;
12567  }
12568  }
12569  return false;
12570 }
12571 
12572 void VmaBlockVector::MakePoolAllocationsLost(
12573  uint32_t currentFrameIndex,
12574  size_t* pLostAllocationCount)
12575 {
12576  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12577  size_t lostAllocationCount = 0;
12578  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12579  {
12580  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12581  VMA_ASSERT(pBlock);
12582  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12583  }
12584  if(pLostAllocationCount != VMA_NULL)
12585  {
12586  *pLostAllocationCount = lostAllocationCount;
12587  }
12588 }
12589 
12590 VkResult VmaBlockVector::CheckCorruption()
12591 {
12592  if(!IsCorruptionDetectionEnabled())
12593  {
12594  return VK_ERROR_FEATURE_NOT_PRESENT;
12595  }
12596 
12597  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12598  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12599  {
12600  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12601  VMA_ASSERT(pBlock);
12602  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12603  if(res != VK_SUCCESS)
12604  {
12605  return res;
12606  }
12607  }
12608  return VK_SUCCESS;
12609 }
12610 
12611 void VmaBlockVector::AddStats(VmaStats* pStats)
12612 {
12613  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12614  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12615 
12616  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12617 
12618  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12619  {
12620  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12621  VMA_ASSERT(pBlock);
12622  VMA_HEAVY_ASSERT(pBlock->Validate());
12623  VmaStatInfo allocationStatInfo;
12624  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12625  VmaAddStatInfo(pStats->total, allocationStatInfo);
12626  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12627  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12628  }
12629 }
12630 
12632 // VmaDefragmentationAlgorithm_Generic members definition
12633 
12634 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12635  VmaAllocator hAllocator,
12636  VmaBlockVector* pBlockVector,
12637  uint32_t currentFrameIndex,
12638  bool overlappingMoveSupported) :
12639  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12640  m_AllocationCount(0),
12641  m_AllAllocations(false),
12642  m_BytesMoved(0),
12643  m_AllocationsMoved(0),
12644  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12645 {
12646  // Create block info for each block.
12647  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12648  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12649  {
12650  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12651  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12652  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12653  m_Blocks.push_back(pBlockInfo);
12654  }
12655 
12656  // Sort them by m_pBlock pointer value.
12657  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12658 }
12659 
12660 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12661 {
12662  for(size_t i = m_Blocks.size(); i--; )
12663  {
12664  vma_delete(m_hAllocator, m_Blocks[i]);
12665  }
12666 }
12667 
12668 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12669 {
12670  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12671  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12672  {
12673  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12674  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12675  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12676  {
12677  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12678  (*it)->m_Allocations.push_back(allocInfo);
12679  }
12680  else
12681  {
12682  VMA_ASSERT(0);
12683  }
12684 
12685  ++m_AllocationCount;
12686  }
12687 }
12688 
12689 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12690  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12691  VkDeviceSize maxBytesToMove,
12692  uint32_t maxAllocationsToMove)
12693 {
12694  if(m_Blocks.empty())
12695  {
12696  return VK_SUCCESS;
12697  }
12698 
12699  // This is a choice based on research.
12700  // Option 1:
12701  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12702  // Option 2:
12703  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12704  // Option 3:
12705  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12706 
12707  size_t srcBlockMinIndex = 0;
12708  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12709  /*
12710  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12711  {
12712  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12713  if(blocksWithNonMovableCount > 0)
12714  {
12715  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12716  }
12717  }
12718  */
12719 
12720  size_t srcBlockIndex = m_Blocks.size() - 1;
12721  size_t srcAllocIndex = SIZE_MAX;
12722  for(;;)
12723  {
12724  // 1. Find next allocation to move.
12725  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12726  // 1.2. Then start from last to first m_Allocations.
12727  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12728  {
12729  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12730  {
12731  // Finished: no more allocations to process.
12732  if(srcBlockIndex == srcBlockMinIndex)
12733  {
12734  return VK_SUCCESS;
12735  }
12736  else
12737  {
12738  --srcBlockIndex;
12739  srcAllocIndex = SIZE_MAX;
12740  }
12741  }
12742  else
12743  {
12744  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12745  }
12746  }
12747 
12748  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12749  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12750 
12751  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12752  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12753  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12754  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12755 
12756  // 2. Try to find new place for this allocation in preceding or current block.
12757  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12758  {
12759  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12760  VmaAllocationRequest dstAllocRequest;
12761  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12762  m_CurrentFrameIndex,
12763  m_pBlockVector->GetFrameInUseCount(),
12764  m_pBlockVector->GetBufferImageGranularity(),
12765  size,
12766  alignment,
12767  false, // upperAddress
12768  suballocType,
12769  false, // canMakeOtherLost
12770  strategy,
12771  &dstAllocRequest) &&
12772  MoveMakesSense(
12773  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12774  {
12775  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12776 
12777  // Reached limit on number of allocations or bytes to move.
12778  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12779  (m_BytesMoved + size > maxBytesToMove))
12780  {
12781  return VK_SUCCESS;
12782  }
12783 
12784  VmaDefragmentationMove move;
12785  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12786  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12787  move.srcOffset = srcOffset;
12788  move.dstOffset = dstAllocRequest.offset;
12789  move.size = size;
12790  moves.push_back(move);
12791 
12792  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12793  dstAllocRequest,
12794  suballocType,
12795  size,
12796  allocInfo.m_hAllocation);
12797  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12798 
12799  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12800 
12801  if(allocInfo.m_pChanged != VMA_NULL)
12802  {
12803  *allocInfo.m_pChanged = VK_TRUE;
12804  }
12805 
12806  ++m_AllocationsMoved;
12807  m_BytesMoved += size;
12808 
12809  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12810 
12811  break;
12812  }
12813  }
12814 
12815  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12816 
12817  if(srcAllocIndex > 0)
12818  {
12819  --srcAllocIndex;
12820  }
12821  else
12822  {
12823  if(srcBlockIndex > 0)
12824  {
12825  --srcBlockIndex;
12826  srcAllocIndex = SIZE_MAX;
12827  }
12828  else
12829  {
12830  return VK_SUCCESS;
12831  }
12832  }
12833  }
12834 }
12835 
12836 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12837 {
12838  size_t result = 0;
12839  for(size_t i = 0; i < m_Blocks.size(); ++i)
12840  {
12841  if(m_Blocks[i]->m_HasNonMovableAllocations)
12842  {
12843  ++result;
12844  }
12845  }
12846  return result;
12847 }
12848 
12849 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12850  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12851  VkDeviceSize maxBytesToMove,
12852  uint32_t maxAllocationsToMove)
12853 {
12854  if(!m_AllAllocations && m_AllocationCount == 0)
12855  {
12856  return VK_SUCCESS;
12857  }
12858 
12859  const size_t blockCount = m_Blocks.size();
12860  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12861  {
12862  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12863 
12864  if(m_AllAllocations)
12865  {
12866  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12867  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12868  it != pMetadata->m_Suballocations.end();
12869  ++it)
12870  {
12871  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12872  {
12873  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12874  pBlockInfo->m_Allocations.push_back(allocInfo);
12875  }
12876  }
12877  }
12878 
12879  pBlockInfo->CalcHasNonMovableAllocations();
12880 
12881  // This is a choice based on research.
12882  // Option 1:
12883  pBlockInfo->SortAllocationsByOffsetDescending();
12884  // Option 2:
12885  //pBlockInfo->SortAllocationsBySizeDescending();
12886  }
12887 
12888  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12889  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12890 
12891  // This is a choice based on research.
12892  const uint32_t roundCount = 2;
12893 
12894  // Execute defragmentation rounds (the main part).
12895  VkResult result = VK_SUCCESS;
12896  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12897  {
12898  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12899  }
12900 
12901  return result;
12902 }
12903 
12904 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12905  size_t dstBlockIndex, VkDeviceSize dstOffset,
12906  size_t srcBlockIndex, VkDeviceSize srcOffset)
12907 {
12908  if(dstBlockIndex < srcBlockIndex)
12909  {
12910  return true;
12911  }
12912  if(dstBlockIndex > srcBlockIndex)
12913  {
12914  return false;
12915  }
12916  if(dstOffset < srcOffset)
12917  {
12918  return true;
12919  }
12920  return false;
12921 }
12922 
12924 // VmaDefragmentationAlgorithm_Fast
12925 
12926 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12927  VmaAllocator hAllocator,
12928  VmaBlockVector* pBlockVector,
12929  uint32_t currentFrameIndex,
12930  bool overlappingMoveSupported) :
12931  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12932  m_OverlappingMoveSupported(overlappingMoveSupported),
12933  m_AllocationCount(0),
12934  m_AllAllocations(false),
12935  m_BytesMoved(0),
12936  m_AllocationsMoved(0),
12937  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12938 {
12939  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12940 
12941 }
12942 
12943 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12944 {
12945 }
12946 
12947 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12948  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12949  VkDeviceSize maxBytesToMove,
12950  uint32_t maxAllocationsToMove)
12951 {
12952  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12953 
12954  const size_t blockCount = m_pBlockVector->GetBlockCount();
12955  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12956  {
12957  return VK_SUCCESS;
12958  }
12959 
12960  PreprocessMetadata();
12961 
12962  // Sort blocks in order from most destination.
12963 
12964  m_BlockInfos.resize(blockCount);
12965  for(size_t i = 0; i < blockCount; ++i)
12966  {
12967  m_BlockInfos[i].origBlockIndex = i;
12968  }
12969 
12970  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12971  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12972  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12973  });
12974 
12975  // THE MAIN ALGORITHM
12976 
12977  FreeSpaceDatabase freeSpaceDb;
12978 
12979  size_t dstBlockInfoIndex = 0;
12980  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12981  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12982  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12983  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12984  VkDeviceSize dstOffset = 0;
12985 
12986  bool end = false;
12987  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12988  {
12989  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12990  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12991  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12992  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12993  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12994  {
12995  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12996  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12997  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12998  if(m_AllocationsMoved == maxAllocationsToMove ||
12999  m_BytesMoved + srcAllocSize > maxBytesToMove)
13000  {
13001  end = true;
13002  break;
13003  }
13004  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13005 
13006  // Try to place it in one of free spaces from the database.
13007  size_t freeSpaceInfoIndex;
13008  VkDeviceSize dstAllocOffset;
13009  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13010  freeSpaceInfoIndex, dstAllocOffset))
13011  {
13012  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13013  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13014  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13015 
13016  // Same block
13017  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13018  {
13019  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13020 
13021  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13022 
13023  VmaSuballocation suballoc = *srcSuballocIt;
13024  suballoc.offset = dstAllocOffset;
13025  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13026  m_BytesMoved += srcAllocSize;
13027  ++m_AllocationsMoved;
13028 
13029  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13030  ++nextSuballocIt;
13031  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13032  srcSuballocIt = nextSuballocIt;
13033 
13034  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13035 
13036  VmaDefragmentationMove move = {
13037  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13038  srcAllocOffset, dstAllocOffset,
13039  srcAllocSize };
13040  moves.push_back(move);
13041  }
13042  // Different block
13043  else
13044  {
13045  // MOVE OPTION 2: Move the allocation to a different block.
13046 
13047  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13048 
13049  VmaSuballocation suballoc = *srcSuballocIt;
13050  suballoc.offset = dstAllocOffset;
13051  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13052  m_BytesMoved += srcAllocSize;
13053  ++m_AllocationsMoved;
13054 
13055  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13056  ++nextSuballocIt;
13057  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13058  srcSuballocIt = nextSuballocIt;
13059 
13060  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13061 
13062  VmaDefragmentationMove move = {
13063  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13064  srcAllocOffset, dstAllocOffset,
13065  srcAllocSize };
13066  moves.push_back(move);
13067  }
13068  }
13069  else
13070  {
13071  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13072 
13073  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13074  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13075  dstAllocOffset + srcAllocSize > dstBlockSize)
13076  {
13077  // But before that, register remaining free space at the end of dst block.
13078  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13079 
13080  ++dstBlockInfoIndex;
13081  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13082  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13083  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13084  dstBlockSize = pDstMetadata->GetSize();
13085  dstOffset = 0;
13086  dstAllocOffset = 0;
13087  }
13088 
13089  // Same block
13090  if(dstBlockInfoIndex == srcBlockInfoIndex)
13091  {
13092  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13093 
13094  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13095 
13096  bool skipOver = overlap;
13097  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13098  {
13099  // If destination and source place overlap, skip if it would move it
13100  // by only < 1/64 of its size.
13101  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13102  }
13103 
13104  if(skipOver)
13105  {
13106  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13107 
13108  dstOffset = srcAllocOffset + srcAllocSize;
13109  ++srcSuballocIt;
13110  }
13111  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13112  else
13113  {
13114  srcSuballocIt->offset = dstAllocOffset;
13115  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13116  dstOffset = dstAllocOffset + srcAllocSize;
13117  m_BytesMoved += srcAllocSize;
13118  ++m_AllocationsMoved;
13119  ++srcSuballocIt;
13120  VmaDefragmentationMove move = {
13121  srcOrigBlockIndex, dstOrigBlockIndex,
13122  srcAllocOffset, dstAllocOffset,
13123  srcAllocSize };
13124  moves.push_back(move);
13125  }
13126  }
13127  // Different block
13128  else
13129  {
13130  // MOVE OPTION 2: Move the allocation to a different block.
13131 
13132  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13133  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13134 
13135  VmaSuballocation suballoc = *srcSuballocIt;
13136  suballoc.offset = dstAllocOffset;
13137  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13138  dstOffset = dstAllocOffset + srcAllocSize;
13139  m_BytesMoved += srcAllocSize;
13140  ++m_AllocationsMoved;
13141 
13142  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13143  ++nextSuballocIt;
13144  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13145  srcSuballocIt = nextSuballocIt;
13146 
13147  pDstMetadata->m_Suballocations.push_back(suballoc);
13148 
13149  VmaDefragmentationMove move = {
13150  srcOrigBlockIndex, dstOrigBlockIndex,
13151  srcAllocOffset, dstAllocOffset,
13152  srcAllocSize };
13153  moves.push_back(move);
13154  }
13155  }
13156  }
13157  }
13158 
13159  m_BlockInfos.clear();
13160 
13161  PostprocessMetadata();
13162 
13163  return VK_SUCCESS;
13164 }
13165 
13166 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13167 {
13168  const size_t blockCount = m_pBlockVector->GetBlockCount();
13169  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13170  {
13171  VmaBlockMetadata_Generic* const pMetadata =
13172  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13173  pMetadata->m_FreeCount = 0;
13174  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13175  pMetadata->m_FreeSuballocationsBySize.clear();
13176  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13177  it != pMetadata->m_Suballocations.end(); )
13178  {
13179  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13180  {
13181  VmaSuballocationList::iterator nextIt = it;
13182  ++nextIt;
13183  pMetadata->m_Suballocations.erase(it);
13184  it = nextIt;
13185  }
13186  else
13187  {
13188  ++it;
13189  }
13190  }
13191  }
13192 }
13193 
13194 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13195 {
13196  const size_t blockCount = m_pBlockVector->GetBlockCount();
13197  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13198  {
13199  VmaBlockMetadata_Generic* const pMetadata =
13200  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13201  const VkDeviceSize blockSize = pMetadata->GetSize();
13202 
13203  // No allocations in this block - entire area is free.
13204  if(pMetadata->m_Suballocations.empty())
13205  {
13206  pMetadata->m_FreeCount = 1;
13207  //pMetadata->m_SumFreeSize is already set to blockSize.
13208  VmaSuballocation suballoc = {
13209  0, // offset
13210  blockSize, // size
13211  VMA_NULL, // hAllocation
13212  VMA_SUBALLOCATION_TYPE_FREE };
13213  pMetadata->m_Suballocations.push_back(suballoc);
13214  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13215  }
13216  // There are some allocations in this block.
13217  else
13218  {
13219  VkDeviceSize offset = 0;
13220  VmaSuballocationList::iterator it;
13221  for(it = pMetadata->m_Suballocations.begin();
13222  it != pMetadata->m_Suballocations.end();
13223  ++it)
13224  {
13225  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13226  VMA_ASSERT(it->offset >= offset);
13227 
13228  // Need to insert preceding free space.
13229  if(it->offset > offset)
13230  {
13231  ++pMetadata->m_FreeCount;
13232  const VkDeviceSize freeSize = it->offset - offset;
13233  VmaSuballocation suballoc = {
13234  offset, // offset
13235  freeSize, // size
13236  VMA_NULL, // hAllocation
13237  VMA_SUBALLOCATION_TYPE_FREE };
13238  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13239  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13240  {
13241  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13242  }
13243  }
13244 
13245  pMetadata->m_SumFreeSize -= it->size;
13246  offset = it->offset + it->size;
13247  }
13248 
13249  // Need to insert trailing free space.
13250  if(offset < blockSize)
13251  {
13252  ++pMetadata->m_FreeCount;
13253  const VkDeviceSize freeSize = blockSize - offset;
13254  VmaSuballocation suballoc = {
13255  offset, // offset
13256  freeSize, // size
13257  VMA_NULL, // hAllocation
13258  VMA_SUBALLOCATION_TYPE_FREE };
13259  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13260  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13261  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13262  {
13263  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13264  }
13265  }
13266 
13267  VMA_SORT(
13268  pMetadata->m_FreeSuballocationsBySize.begin(),
13269  pMetadata->m_FreeSuballocationsBySize.end(),
13270  VmaSuballocationItemSizeLess());
13271  }
13272 
13273  VMA_HEAVY_ASSERT(pMetadata->Validate());
13274  }
13275 }
13276 
13277 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13278 {
13279  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13280  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13281  while(it != pMetadata->m_Suballocations.end())
13282  {
13283  if(it->offset < suballoc.offset)
13284  {
13285  ++it;
13286  }
13287  }
13288  pMetadata->m_Suballocations.insert(it, suballoc);
13289 }
13290 
13292 // VmaBlockVectorDefragmentationContext
13293 
13294 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13295  VmaAllocator hAllocator,
13296  VmaPool hCustomPool,
13297  VmaBlockVector* pBlockVector,
13298  uint32_t currFrameIndex,
13299  uint32_t algorithmFlags) :
13300  res(VK_SUCCESS),
13301  mutexLocked(false),
13302  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13303  m_hAllocator(hAllocator),
13304  m_hCustomPool(hCustomPool),
13305  m_pBlockVector(pBlockVector),
13306  m_CurrFrameIndex(currFrameIndex),
13307  m_AlgorithmFlags(algorithmFlags),
13308  m_pAlgorithm(VMA_NULL),
13309  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13310  m_AllAllocations(false)
13311 {
13312 }
13313 
13314 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13315 {
13316  vma_delete(m_hAllocator, m_pAlgorithm);
13317 }
13318 
13319 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13320 {
13321  AllocInfo info = { hAlloc, pChanged };
13322  m_Allocations.push_back(info);
13323 }
13324 
13325 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13326 {
13327  const bool allAllocations = m_AllAllocations ||
13328  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13329 
13330  /********************************
13331  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13332  ********************************/
13333 
13334  /*
13335  Fast algorithm is supported only when certain criteria are met:
13336  - VMA_DEBUG_MARGIN is 0.
13337  - All allocations in this block vector are moveable.
13338  - There is no possibility of image/buffer granularity conflict.
13339  */
13340  if(VMA_DEBUG_MARGIN == 0 &&
13341  allAllocations &&
13342  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13343  {
13344  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13345  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13346  }
13347  else
13348  {
13349  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13350  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13351  }
13352 
13353  if(allAllocations)
13354  {
13355  m_pAlgorithm->AddAll();
13356  }
13357  else
13358  {
13359  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13360  {
13361  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13362  }
13363  }
13364 }
13365 
13367 // VmaDefragmentationContext
13368 
13369 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13370  VmaAllocator hAllocator,
13371  uint32_t currFrameIndex,
13372  uint32_t flags,
13373  VmaDefragmentationStats* pStats) :
13374  m_hAllocator(hAllocator),
13375  m_CurrFrameIndex(currFrameIndex),
13376  m_Flags(flags),
13377  m_pStats(pStats),
13378  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13379 {
13380  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13381 }
13382 
13383 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13384 {
13385  for(size_t i = m_CustomPoolContexts.size(); i--; )
13386  {
13387  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13388  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13389  vma_delete(m_hAllocator, pBlockVectorCtx);
13390  }
13391  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13392  {
13393  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13394  if(pBlockVectorCtx)
13395  {
13396  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13397  vma_delete(m_hAllocator, pBlockVectorCtx);
13398  }
13399  }
13400 }
13401 
13402 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13403 {
13404  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13405  {
13406  VmaPool pool = pPools[poolIndex];
13407  VMA_ASSERT(pool);
13408  // Pools with algorithm other than default are not defragmented.
13409  if(pool->m_BlockVector.GetAlgorithm() == 0)
13410  {
13411  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13412 
13413  for(size_t i = m_CustomPoolContexts.size(); i--; )
13414  {
13415  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13416  {
13417  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13418  break;
13419  }
13420  }
13421 
13422  if(!pBlockVectorDefragCtx)
13423  {
13424  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13425  m_hAllocator,
13426  pool,
13427  &pool->m_BlockVector,
13428  m_CurrFrameIndex,
13429  m_Flags);
13430  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13431  }
13432 
13433  pBlockVectorDefragCtx->AddAll();
13434  }
13435  }
13436 }
13437 
13438 void VmaDefragmentationContext_T::AddAllocations(
13439  uint32_t allocationCount,
13440  VmaAllocation* pAllocations,
13441  VkBool32* pAllocationsChanged)
13442 {
13443  // Dispatch pAllocations among defragmentators. Create them when necessary.
13444  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13445  {
13446  const VmaAllocation hAlloc = pAllocations[allocIndex];
13447  VMA_ASSERT(hAlloc);
13448  // DedicatedAlloc cannot be defragmented.
13449  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13450  // Lost allocation cannot be defragmented.
13451  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13452  {
13453  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13454 
13455  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13456  // This allocation belongs to custom pool.
13457  if(hAllocPool != VK_NULL_HANDLE)
13458  {
13459  // Pools with algorithm other than default are not defragmented.
13460  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13461  {
13462  for(size_t i = m_CustomPoolContexts.size(); i--; )
13463  {
13464  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13465  {
13466  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13467  break;
13468  }
13469  }
13470  if(!pBlockVectorDefragCtx)
13471  {
13472  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13473  m_hAllocator,
13474  hAllocPool,
13475  &hAllocPool->m_BlockVector,
13476  m_CurrFrameIndex,
13477  m_Flags);
13478  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13479  }
13480  }
13481  }
13482  // This allocation belongs to default pool.
13483  else
13484  {
13485  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13486  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13487  if(!pBlockVectorDefragCtx)
13488  {
13489  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13490  m_hAllocator,
13491  VMA_NULL, // hCustomPool
13492  m_hAllocator->m_pBlockVectors[memTypeIndex],
13493  m_CurrFrameIndex,
13494  m_Flags);
13495  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13496  }
13497  }
13498 
13499  if(pBlockVectorDefragCtx)
13500  {
13501  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13502  &pAllocationsChanged[allocIndex] : VMA_NULL;
13503  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13504  }
13505  }
13506  }
13507 }
13508 
13509 VkResult VmaDefragmentationContext_T::Defragment(
13510  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13511  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13512  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13513 {
13514  if(pStats)
13515  {
13516  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13517  }
13518 
13519  if(commandBuffer == VK_NULL_HANDLE)
13520  {
13521  maxGpuBytesToMove = 0;
13522  maxGpuAllocationsToMove = 0;
13523  }
13524 
13525  VkResult res = VK_SUCCESS;
13526 
13527  // Process default pools.
13528  for(uint32_t memTypeIndex = 0;
13529  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13530  ++memTypeIndex)
13531  {
13532  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13533  if(pBlockVectorCtx)
13534  {
13535  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13536  pBlockVectorCtx->GetBlockVector()->Defragment(
13537  pBlockVectorCtx,
13538  pStats,
13539  maxCpuBytesToMove, maxCpuAllocationsToMove,
13540  maxGpuBytesToMove, maxGpuAllocationsToMove,
13541  commandBuffer);
13542  if(pBlockVectorCtx->res != VK_SUCCESS)
13543  {
13544  res = pBlockVectorCtx->res;
13545  }
13546  }
13547  }
13548 
13549  // Process custom pools.
13550  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13551  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13552  ++customCtxIndex)
13553  {
13554  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13555  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13556  pBlockVectorCtx->GetBlockVector()->Defragment(
13557  pBlockVectorCtx,
13558  pStats,
13559  maxCpuBytesToMove, maxCpuAllocationsToMove,
13560  maxGpuBytesToMove, maxGpuAllocationsToMove,
13561  commandBuffer);
13562  if(pBlockVectorCtx->res != VK_SUCCESS)
13563  {
13564  res = pBlockVectorCtx->res;
13565  }
13566  }
13567 
13568  return res;
13569 }
13570 
13572 // VmaRecorder
13573 
13574 #if VMA_RECORDING_ENABLED
13575 
13576 VmaRecorder::VmaRecorder() :
13577  m_UseMutex(true),
13578  m_Flags(0),
13579  m_File(VMA_NULL),
13580  m_Freq(INT64_MAX),
13581  m_StartCounter(INT64_MAX)
13582 {
13583 }
13584 
13585 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13586 {
13587  m_UseMutex = useMutex;
13588  m_Flags = settings.flags;
13589 
13590  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13591  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13592 
13593  // Open file for writing.
13594  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13595  if(err != 0)
13596  {
13597  return VK_ERROR_INITIALIZATION_FAILED;
13598  }
13599 
13600  // Write header.
13601  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13602  fprintf(m_File, "%s\n", "1,5");
13603 
13604  return VK_SUCCESS;
13605 }
13606 
13607 VmaRecorder::~VmaRecorder()
13608 {
13609  if(m_File != VMA_NULL)
13610  {
13611  fclose(m_File);
13612  }
13613 }
13614 
13615 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13616 {
13617  CallParams callParams;
13618  GetBasicParams(callParams);
13619 
13620  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13621  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13622  Flush();
13623 }
13624 
13625 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13626 {
13627  CallParams callParams;
13628  GetBasicParams(callParams);
13629 
13630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13631  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13632  Flush();
13633 }
13634 
13635 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13636 {
13637  CallParams callParams;
13638  GetBasicParams(callParams);
13639 
13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13641  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13642  createInfo.memoryTypeIndex,
13643  createInfo.flags,
13644  createInfo.blockSize,
13645  (uint64_t)createInfo.minBlockCount,
13646  (uint64_t)createInfo.maxBlockCount,
13647  createInfo.frameInUseCount,
13648  pool);
13649  Flush();
13650 }
13651 
13652 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13653 {
13654  CallParams callParams;
13655  GetBasicParams(callParams);
13656 
13657  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13658  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13659  pool);
13660  Flush();
13661 }
13662 
13663 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13664  const VkMemoryRequirements& vkMemReq,
13665  const VmaAllocationCreateInfo& createInfo,
13666  VmaAllocation allocation)
13667 {
13668  CallParams callParams;
13669  GetBasicParams(callParams);
13670 
13671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13674  vkMemReq.size,
13675  vkMemReq.alignment,
13676  vkMemReq.memoryTypeBits,
13677  createInfo.flags,
13678  createInfo.usage,
13679  createInfo.requiredFlags,
13680  createInfo.preferredFlags,
13681  createInfo.memoryTypeBits,
13682  createInfo.pool,
13683  allocation,
13684  userDataStr.GetString());
13685  Flush();
13686 }
13687 
13688 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13689  const VkMemoryRequirements& vkMemReq,
13690  const VmaAllocationCreateInfo& createInfo,
13691  uint64_t allocationCount,
13692  const VmaAllocation* pAllocations)
13693 {
13694  CallParams callParams;
13695  GetBasicParams(callParams);
13696 
13697  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13698  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13699  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13700  vkMemReq.size,
13701  vkMemReq.alignment,
13702  vkMemReq.memoryTypeBits,
13703  createInfo.flags,
13704  createInfo.usage,
13705  createInfo.requiredFlags,
13706  createInfo.preferredFlags,
13707  createInfo.memoryTypeBits,
13708  createInfo.pool);
13709  PrintPointerList(allocationCount, pAllocations);
13710  fprintf(m_File, ",%s\n", userDataStr.GetString());
13711  Flush();
13712 }
13713 
13714 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13715  const VkMemoryRequirements& vkMemReq,
13716  bool requiresDedicatedAllocation,
13717  bool prefersDedicatedAllocation,
13718  const VmaAllocationCreateInfo& createInfo,
13719  VmaAllocation allocation)
13720 {
13721  CallParams callParams;
13722  GetBasicParams(callParams);
13723 
13724  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13725  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13726  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13727  vkMemReq.size,
13728  vkMemReq.alignment,
13729  vkMemReq.memoryTypeBits,
13730  requiresDedicatedAllocation ? 1 : 0,
13731  prefersDedicatedAllocation ? 1 : 0,
13732  createInfo.flags,
13733  createInfo.usage,
13734  createInfo.requiredFlags,
13735  createInfo.preferredFlags,
13736  createInfo.memoryTypeBits,
13737  createInfo.pool,
13738  allocation,
13739  userDataStr.GetString());
13740  Flush();
13741 }
13742 
13743 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13744  const VkMemoryRequirements& vkMemReq,
13745  bool requiresDedicatedAllocation,
13746  bool prefersDedicatedAllocation,
13747  const VmaAllocationCreateInfo& createInfo,
13748  VmaAllocation allocation)
13749 {
13750  CallParams callParams;
13751  GetBasicParams(callParams);
13752 
13753  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13754  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13755  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13756  vkMemReq.size,
13757  vkMemReq.alignment,
13758  vkMemReq.memoryTypeBits,
13759  requiresDedicatedAllocation ? 1 : 0,
13760  prefersDedicatedAllocation ? 1 : 0,
13761  createInfo.flags,
13762  createInfo.usage,
13763  createInfo.requiredFlags,
13764  createInfo.preferredFlags,
13765  createInfo.memoryTypeBits,
13766  createInfo.pool,
13767  allocation,
13768  userDataStr.GetString());
13769  Flush();
13770 }
13771 
13772 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13773  VmaAllocation allocation)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13780  allocation);
13781  Flush();
13782 }
13783 
13784 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13785  uint64_t allocationCount,
13786  const VmaAllocation* pAllocations)
13787 {
13788  CallParams callParams;
13789  GetBasicParams(callParams);
13790 
13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13792  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13793  PrintPointerList(allocationCount, pAllocations);
13794  fprintf(m_File, "\n");
13795  Flush();
13796 }
13797 
13798 void VmaRecorder::RecordResizeAllocation(
13799  uint32_t frameIndex,
13800  VmaAllocation allocation,
13801  VkDeviceSize newSize)
13802 {
13803  CallParams callParams;
13804  GetBasicParams(callParams);
13805 
13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13808  allocation, newSize);
13809  Flush();
13810 }
13811 
13812 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13813  VmaAllocation allocation,
13814  const void* pUserData)
13815 {
13816  CallParams callParams;
13817  GetBasicParams(callParams);
13818 
13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13820  UserDataString userDataStr(
13821  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13822  pUserData);
13823  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13824  allocation,
13825  userDataStr.GetString());
13826  Flush();
13827 }
13828 
13829 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13830  VmaAllocation allocation)
13831 {
13832  CallParams callParams;
13833  GetBasicParams(callParams);
13834 
13835  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13836  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13837  allocation);
13838  Flush();
13839 }
13840 
13841 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13842  VmaAllocation allocation)
13843 {
13844  CallParams callParams;
13845  GetBasicParams(callParams);
13846 
13847  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13848  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13849  allocation);
13850  Flush();
13851 }
13852 
13853 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13854  VmaAllocation allocation)
13855 {
13856  CallParams callParams;
13857  GetBasicParams(callParams);
13858 
13859  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13860  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13861  allocation);
13862  Flush();
13863 }
13864 
13865 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13866  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13867 {
13868  CallParams callParams;
13869  GetBasicParams(callParams);
13870 
13871  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13872  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13873  allocation,
13874  offset,
13875  size);
13876  Flush();
13877 }
13878 
13879 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13880  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13881 {
13882  CallParams callParams;
13883  GetBasicParams(callParams);
13884 
13885  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13886  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13887  allocation,
13888  offset,
13889  size);
13890  Flush();
13891 }
13892 
13893 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13894  const VkBufferCreateInfo& bufCreateInfo,
13895  const VmaAllocationCreateInfo& allocCreateInfo,
13896  VmaAllocation allocation)
13897 {
13898  CallParams callParams;
13899  GetBasicParams(callParams);
13900 
13901  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13902  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13903  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13904  bufCreateInfo.flags,
13905  bufCreateInfo.size,
13906  bufCreateInfo.usage,
13907  bufCreateInfo.sharingMode,
13908  allocCreateInfo.flags,
13909  allocCreateInfo.usage,
13910  allocCreateInfo.requiredFlags,
13911  allocCreateInfo.preferredFlags,
13912  allocCreateInfo.memoryTypeBits,
13913  allocCreateInfo.pool,
13914  allocation,
13915  userDataStr.GetString());
13916  Flush();
13917 }
13918 
13919 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13920  const VkImageCreateInfo& imageCreateInfo,
13921  const VmaAllocationCreateInfo& allocCreateInfo,
13922  VmaAllocation allocation)
13923 {
13924  CallParams callParams;
13925  GetBasicParams(callParams);
13926 
13927  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13928  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13929  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13930  imageCreateInfo.flags,
13931  imageCreateInfo.imageType,
13932  imageCreateInfo.format,
13933  imageCreateInfo.extent.width,
13934  imageCreateInfo.extent.height,
13935  imageCreateInfo.extent.depth,
13936  imageCreateInfo.mipLevels,
13937  imageCreateInfo.arrayLayers,
13938  imageCreateInfo.samples,
13939  imageCreateInfo.tiling,
13940  imageCreateInfo.usage,
13941  imageCreateInfo.sharingMode,
13942  imageCreateInfo.initialLayout,
13943  allocCreateInfo.flags,
13944  allocCreateInfo.usage,
13945  allocCreateInfo.requiredFlags,
13946  allocCreateInfo.preferredFlags,
13947  allocCreateInfo.memoryTypeBits,
13948  allocCreateInfo.pool,
13949  allocation,
13950  userDataStr.GetString());
13951  Flush();
13952 }
13953 
13954 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13955  VmaAllocation allocation)
13956 {
13957  CallParams callParams;
13958  GetBasicParams(callParams);
13959 
13960  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13961  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13962  allocation);
13963  Flush();
13964 }
13965 
13966 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13967  VmaAllocation allocation)
13968 {
13969  CallParams callParams;
13970  GetBasicParams(callParams);
13971 
13972  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13973  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13974  allocation);
13975  Flush();
13976 }
13977 
13978 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13979  VmaAllocation allocation)
13980 {
13981  CallParams callParams;
13982  GetBasicParams(callParams);
13983 
13984  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13985  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13986  allocation);
13987  Flush();
13988 }
13989 
13990 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13991  VmaAllocation allocation)
13992 {
13993  CallParams callParams;
13994  GetBasicParams(callParams);
13995 
13996  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13997  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13998  allocation);
13999  Flush();
14000 }
14001 
14002 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14003  VmaPool pool)
14004 {
14005  CallParams callParams;
14006  GetBasicParams(callParams);
14007 
14008  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14009  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14010  pool);
14011  Flush();
14012 }
14013 
14014 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14015  const VmaDefragmentationInfo2& info,
14017 {
14018  CallParams callParams;
14019  GetBasicParams(callParams);
14020 
14021  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14022  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14023  info.flags);
14024  PrintPointerList(info.allocationCount, info.pAllocations);
14025  fprintf(m_File, ",");
14026  PrintPointerList(info.poolCount, info.pPools);
14027  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14028  info.maxCpuBytesToMove,
14030  info.maxGpuBytesToMove,
14032  info.commandBuffer,
14033  ctx);
14034  Flush();
14035 }
14036 
14037 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14039 {
14040  CallParams callParams;
14041  GetBasicParams(callParams);
14042 
14043  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14044  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14045  ctx);
14046  Flush();
14047 }
14048 
14049 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14050 {
14051  if(pUserData != VMA_NULL)
14052  {
14053  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14054  {
14055  m_Str = (const char*)pUserData;
14056  }
14057  else
14058  {
14059  sprintf_s(m_PtrStr, "%p", pUserData);
14060  m_Str = m_PtrStr;
14061  }
14062  }
14063  else
14064  {
14065  m_Str = "";
14066  }
14067 }
14068 
14069 void VmaRecorder::WriteConfiguration(
14070  const VkPhysicalDeviceProperties& devProps,
14071  const VkPhysicalDeviceMemoryProperties& memProps,
14072  bool dedicatedAllocationExtensionEnabled)
14073 {
14074  fprintf(m_File, "Config,Begin\n");
14075 
14076  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14077  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14078  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14079  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14080  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14081  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14082 
14083  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14084  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14085  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14086 
14087  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14088  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14089  {
14090  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14091  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14092  }
14093  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14094  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14095  {
14096  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14097  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14098  }
14099 
14100  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14101 
14102  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14103  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14104  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14105  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14106  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14107  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14108  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14109  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14110  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14111 
14112  fprintf(m_File, "Config,End\n");
14113 }
14114 
14115 void VmaRecorder::GetBasicParams(CallParams& outParams)
14116 {
14117  outParams.threadId = GetCurrentThreadId();
14118 
14119  LARGE_INTEGER counter;
14120  QueryPerformanceCounter(&counter);
14121  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14122 }
14123 
14124 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14125 {
14126  if(count)
14127  {
14128  fprintf(m_File, "%p", pItems[0]);
14129  for(uint64_t i = 1; i < count; ++i)
14130  {
14131  fprintf(m_File, " %p", pItems[i]);
14132  }
14133  }
14134 }
14135 
14136 void VmaRecorder::Flush()
14137 {
14138  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14139  {
14140  fflush(m_File);
14141  }
14142 }
14143 
14144 #endif // #if VMA_RECORDING_ENABLED
14145 
14147 // VmaAllocationObjectAllocator
14148 
14149 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14150  m_Allocator(pAllocationCallbacks, 1024)
14151 {
14152 }
14153 
14154 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14155 {
14156  VmaMutexLock mutexLock(m_Mutex);
14157  return m_Allocator.Alloc();
14158 }
14159 
14160 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14161 {
14162  VmaMutexLock mutexLock(m_Mutex);
14163  m_Allocator.Free(hAlloc);
14164 }
14165 
14167 // VmaAllocator_T
14168 
14169 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14170  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14171  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14172  m_hDevice(pCreateInfo->device),
14173  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14174  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14175  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14176  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14177  m_PreferredLargeHeapBlockSize(0),
14178  m_PhysicalDevice(pCreateInfo->physicalDevice),
14179  m_CurrentFrameIndex(0),
14180  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14181  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14182  m_NextPoolId(0)
14184  ,m_pRecorder(VMA_NULL)
14185 #endif
14186 {
14187  if(VMA_DEBUG_DETECT_CORRUPTION)
14188  {
14189  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14190  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14191  }
14192 
14193  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14194 
14195 #if !(VMA_DEDICATED_ALLOCATION)
14197  {
14198  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14199  }
14200 #endif
14201 
14202  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14203  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14204  memset(&m_MemProps, 0, sizeof(m_MemProps));
14205 
14206  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14207  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14208 
14209  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14210  {
14211  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14212  }
14213 
14214  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14215  {
14216  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14217  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14218  }
14219 
14220  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14221 
14222  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14223  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14224 
14225  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14226  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14227  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14228  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14229 
14230  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14231  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14232 
14233  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14234  {
14235  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14236  {
14237  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14238  if(limit != VK_WHOLE_SIZE)
14239  {
14240  m_HeapSizeLimit[heapIndex] = limit;
14241  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14242  {
14243  m_MemProps.memoryHeaps[heapIndex].size = limit;
14244  }
14245  }
14246  }
14247  }
14248 
14249  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14250  {
14251  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14252 
14253  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14254  this,
14255  VK_NULL_HANDLE, // hParentPool
14256  memTypeIndex,
14257  preferredBlockSize,
14258  0,
14259  SIZE_MAX,
14260  GetBufferImageGranularity(),
14261  pCreateInfo->frameInUseCount,
14262  false, // isCustomPool
14263  false, // explicitBlockSize
14264  false); // linearAlgorithm
14265  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14266  // becase minBlockCount is 0.
14267  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14268 
14269  }
14270 }
14271 
14272 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14273 {
14274  VkResult res = VK_SUCCESS;
14275 
14276  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14277  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14278  {
14279 #if VMA_RECORDING_ENABLED
14280  m_pRecorder = vma_new(this, VmaRecorder)();
14281  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14282  if(res != VK_SUCCESS)
14283  {
14284  return res;
14285  }
14286  m_pRecorder->WriteConfiguration(
14287  m_PhysicalDeviceProperties,
14288  m_MemProps,
14289  m_UseKhrDedicatedAllocation);
14290  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14291 #else
14292  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14293  return VK_ERROR_FEATURE_NOT_PRESENT;
14294 #endif
14295  }
14296 
14297  return res;
14298 }
14299 
14300 VmaAllocator_T::~VmaAllocator_T()
14301 {
14302 #if VMA_RECORDING_ENABLED
14303  if(m_pRecorder != VMA_NULL)
14304  {
14305  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14306  vma_delete(this, m_pRecorder);
14307  }
14308 #endif
14309 
14310  VMA_ASSERT(m_Pools.empty());
14311 
14312  for(size_t i = GetMemoryTypeCount(); i--; )
14313  {
14314  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14315  {
14316  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14317  }
14318 
14319  vma_delete(this, m_pDedicatedAllocations[i]);
14320  vma_delete(this, m_pBlockVectors[i]);
14321  }
14322 }
14323 
14324 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14325 {
14326 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14327  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14328  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14329  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14330  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14331  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14332  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14333  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14334  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14335  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14336  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14337  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14338  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14339  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14340  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14341  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14342  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14343  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14344 #if VMA_DEDICATED_ALLOCATION
14345  if(m_UseKhrDedicatedAllocation)
14346  {
14347  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14348  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14349  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14350  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14351  }
14352 #endif // #if VMA_DEDICATED_ALLOCATION
14353 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14354 
14355 #define VMA_COPY_IF_NOT_NULL(funcName) \
14356  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14357 
14358  if(pVulkanFunctions != VMA_NULL)
14359  {
14360  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14361  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14362  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14363  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14364  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14365  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14366  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14367  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14368  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14369  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14370  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14371  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14372  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14373  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14374  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14375  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14376  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14377 #if VMA_DEDICATED_ALLOCATION
14378  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14379  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14380 #endif
14381  }
14382 
14383 #undef VMA_COPY_IF_NOT_NULL
14384 
14385  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14386  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14387  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14388  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14389  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14390  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14391  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14392  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14393  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14394  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14395  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14396  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14397  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14398  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14399  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14400  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14401  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14402  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14403  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14404 #if VMA_DEDICATED_ALLOCATION
14405  if(m_UseKhrDedicatedAllocation)
14406  {
14407  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14408  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14409  }
14410 #endif
14411 }
14412 
14413 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14414 {
14415  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14416  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14417  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14418  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14419 }
14420 
14421 VkResult VmaAllocator_T::AllocateMemoryOfType(
14422  VkDeviceSize size,
14423  VkDeviceSize alignment,
14424  bool dedicatedAllocation,
14425  VkBuffer dedicatedBuffer,
14426  VkImage dedicatedImage,
14427  const VmaAllocationCreateInfo& createInfo,
14428  uint32_t memTypeIndex,
14429  VmaSuballocationType suballocType,
14430  size_t allocationCount,
14431  VmaAllocation* pAllocations)
14432 {
14433  VMA_ASSERT(pAllocations != VMA_NULL);
14434  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14435 
14436  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14437 
14438  // If memory type is not HOST_VISIBLE, disable MAPPED.
14439  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14440  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14441  {
14442  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14443  }
14444 
14445  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14446  VMA_ASSERT(blockVector);
14447 
14448  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14449  bool preferDedicatedMemory =
14450  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14451  dedicatedAllocation ||
14452  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14453  size > preferredBlockSize / 2;
14454 
14455  if(preferDedicatedMemory &&
14456  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14457  finalCreateInfo.pool == VK_NULL_HANDLE)
14458  {
14460  }
14461 
14462  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14463  {
14464  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14465  {
14466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14467  }
14468  else
14469  {
14470  return AllocateDedicatedMemory(
14471  size,
14472  suballocType,
14473  memTypeIndex,
14474  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14475  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14476  finalCreateInfo.pUserData,
14477  dedicatedBuffer,
14478  dedicatedImage,
14479  allocationCount,
14480  pAllocations);
14481  }
14482  }
14483  else
14484  {
14485  VkResult res = blockVector->Allocate(
14486  m_CurrentFrameIndex.load(),
14487  size,
14488  alignment,
14489  finalCreateInfo,
14490  suballocType,
14491  allocationCount,
14492  pAllocations);
14493  if(res == VK_SUCCESS)
14494  {
14495  return res;
14496  }
14497 
14498  // 5. Try dedicated memory.
14499  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14500  {
14501  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14502  }
14503  else
14504  {
14505  res = AllocateDedicatedMemory(
14506  size,
14507  suballocType,
14508  memTypeIndex,
14509  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14510  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14511  finalCreateInfo.pUserData,
14512  dedicatedBuffer,
14513  dedicatedImage,
14514  allocationCount,
14515  pAllocations);
14516  if(res == VK_SUCCESS)
14517  {
14518  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14519  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14520  return VK_SUCCESS;
14521  }
14522  else
14523  {
14524  // Everything failed: Return error code.
14525  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14526  return res;
14527  }
14528  }
14529  }
14530 }
14531 
14532 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14533  VkDeviceSize size,
14534  VmaSuballocationType suballocType,
14535  uint32_t memTypeIndex,
14536  bool map,
14537  bool isUserDataString,
14538  void* pUserData,
14539  VkBuffer dedicatedBuffer,
14540  VkImage dedicatedImage,
14541  size_t allocationCount,
14542  VmaAllocation* pAllocations)
14543 {
14544  VMA_ASSERT(allocationCount > 0 && pAllocations);
14545 
14546  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14547  allocInfo.memoryTypeIndex = memTypeIndex;
14548  allocInfo.allocationSize = size;
14549 
14550 #if VMA_DEDICATED_ALLOCATION
14551  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14552  if(m_UseKhrDedicatedAllocation)
14553  {
14554  if(dedicatedBuffer != VK_NULL_HANDLE)
14555  {
14556  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14557  dedicatedAllocInfo.buffer = dedicatedBuffer;
14558  allocInfo.pNext = &dedicatedAllocInfo;
14559  }
14560  else if(dedicatedImage != VK_NULL_HANDLE)
14561  {
14562  dedicatedAllocInfo.image = dedicatedImage;
14563  allocInfo.pNext = &dedicatedAllocInfo;
14564  }
14565  }
14566 #endif // #if VMA_DEDICATED_ALLOCATION
14567 
14568  size_t allocIndex;
14569  VkResult res = VK_SUCCESS;
14570  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14571  {
14572  res = AllocateDedicatedMemoryPage(
14573  size,
14574  suballocType,
14575  memTypeIndex,
14576  allocInfo,
14577  map,
14578  isUserDataString,
14579  pUserData,
14580  pAllocations + allocIndex);
14581  if(res != VK_SUCCESS)
14582  {
14583  break;
14584  }
14585  }
14586 
14587  if(res == VK_SUCCESS)
14588  {
14589  // Register them in m_pDedicatedAllocations.
14590  {
14591  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14592  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14593  VMA_ASSERT(pDedicatedAllocations);
14594  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14595  {
14596  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14597  }
14598  }
14599 
14600  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14601  }
14602  else
14603  {
14604  // Free all already created allocations.
14605  while(allocIndex--)
14606  {
14607  VmaAllocation currAlloc = pAllocations[allocIndex];
14608  VkDeviceMemory hMemory = currAlloc->GetMemory();
14609 
14610  /*
14611  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14612  before vkFreeMemory.
14613 
14614  if(currAlloc->GetMappedData() != VMA_NULL)
14615  {
14616  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14617  }
14618  */
14619 
14620  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14621 
14622  currAlloc->SetUserData(this, VMA_NULL);
14623  currAlloc->Dtor();
14624  m_AllocationObjectAllocator.Free(currAlloc);
14625  }
14626 
14627  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14628  }
14629 
14630  return res;
14631 }
14632 
14633 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14634  VkDeviceSize size,
14635  VmaSuballocationType suballocType,
14636  uint32_t memTypeIndex,
14637  const VkMemoryAllocateInfo& allocInfo,
14638  bool map,
14639  bool isUserDataString,
14640  void* pUserData,
14641  VmaAllocation* pAllocation)
14642 {
14643  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14644  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14645  if(res < 0)
14646  {
14647  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14648  return res;
14649  }
14650 
14651  void* pMappedData = VMA_NULL;
14652  if(map)
14653  {
14654  res = (*m_VulkanFunctions.vkMapMemory)(
14655  m_hDevice,
14656  hMemory,
14657  0,
14658  VK_WHOLE_SIZE,
14659  0,
14660  &pMappedData);
14661  if(res < 0)
14662  {
14663  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14664  FreeVulkanMemory(memTypeIndex, size, hMemory);
14665  return res;
14666  }
14667  }
14668 
14669  *pAllocation = m_AllocationObjectAllocator.Allocate();
14670  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14671  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14672  (*pAllocation)->SetUserData(this, pUserData);
14673  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14674  {
14675  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14676  }
14677 
14678  return VK_SUCCESS;
14679 }
14680 
14681 void VmaAllocator_T::GetBufferMemoryRequirements(
14682  VkBuffer hBuffer,
14683  VkMemoryRequirements& memReq,
14684  bool& requiresDedicatedAllocation,
14685  bool& prefersDedicatedAllocation) const
14686 {
14687 #if VMA_DEDICATED_ALLOCATION
14688  if(m_UseKhrDedicatedAllocation)
14689  {
14690  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14691  memReqInfo.buffer = hBuffer;
14692 
14693  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14694 
14695  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14696  memReq2.pNext = &memDedicatedReq;
14697 
14698  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14699 
14700  memReq = memReq2.memoryRequirements;
14701  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14702  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14703  }
14704  else
14705 #endif // #if VMA_DEDICATED_ALLOCATION
14706  {
14707  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14708  requiresDedicatedAllocation = false;
14709  prefersDedicatedAllocation = false;
14710  }
14711 }
14712 
14713 void VmaAllocator_T::GetImageMemoryRequirements(
14714  VkImage hImage,
14715  VkMemoryRequirements& memReq,
14716  bool& requiresDedicatedAllocation,
14717  bool& prefersDedicatedAllocation) const
14718 {
14719 #if VMA_DEDICATED_ALLOCATION
14720  if(m_UseKhrDedicatedAllocation)
14721  {
14722  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14723  memReqInfo.image = hImage;
14724 
14725  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14726 
14727  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14728  memReq2.pNext = &memDedicatedReq;
14729 
14730  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14731 
14732  memReq = memReq2.memoryRequirements;
14733  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14734  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14735  }
14736  else
14737 #endif // #if VMA_DEDICATED_ALLOCATION
14738  {
14739  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14740  requiresDedicatedAllocation = false;
14741  prefersDedicatedAllocation = false;
14742  }
14743 }
14744 
14745 VkResult VmaAllocator_T::AllocateMemory(
14746  const VkMemoryRequirements& vkMemReq,
14747  bool requiresDedicatedAllocation,
14748  bool prefersDedicatedAllocation,
14749  VkBuffer dedicatedBuffer,
14750  VkImage dedicatedImage,
14751  const VmaAllocationCreateInfo& createInfo,
14752  VmaSuballocationType suballocType,
14753  size_t allocationCount,
14754  VmaAllocation* pAllocations)
14755 {
14756  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14757 
14758  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14759 
14760  if(vkMemReq.size == 0)
14761  {
14762  return VK_ERROR_VALIDATION_FAILED_EXT;
14763  }
14764  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14765  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14766  {
14767  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14768  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14769  }
14770  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14772  {
14773  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14774  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14775  }
14776  if(requiresDedicatedAllocation)
14777  {
14778  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14779  {
14780  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14782  }
14783  if(createInfo.pool != VK_NULL_HANDLE)
14784  {
14785  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14786  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14787  }
14788  }
14789  if((createInfo.pool != VK_NULL_HANDLE) &&
14790  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14791  {
14792  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14793  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14794  }
14795 
14796  if(createInfo.pool != VK_NULL_HANDLE)
14797  {
14798  const VkDeviceSize alignmentForPool = VMA_MAX(
14799  vkMemReq.alignment,
14800  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14801  return createInfo.pool->m_BlockVector.Allocate(
14802  m_CurrentFrameIndex.load(),
14803  vkMemReq.size,
14804  alignmentForPool,
14805  createInfo,
14806  suballocType,
14807  allocationCount,
14808  pAllocations);
14809  }
14810  else
14811  {
14812  // Bit mask of memory Vulkan types acceptable for this allocation.
14813  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14814  uint32_t memTypeIndex = UINT32_MAX;
14815  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14816  if(res == VK_SUCCESS)
14817  {
14818  VkDeviceSize alignmentForMemType = VMA_MAX(
14819  vkMemReq.alignment,
14820  GetMemoryTypeMinAlignment(memTypeIndex));
14821 
14822  res = AllocateMemoryOfType(
14823  vkMemReq.size,
14824  alignmentForMemType,
14825  requiresDedicatedAllocation || prefersDedicatedAllocation,
14826  dedicatedBuffer,
14827  dedicatedImage,
14828  createInfo,
14829  memTypeIndex,
14830  suballocType,
14831  allocationCount,
14832  pAllocations);
14833  // Succeeded on first try.
14834  if(res == VK_SUCCESS)
14835  {
14836  return res;
14837  }
14838  // Allocation from this memory type failed. Try other compatible memory types.
14839  else
14840  {
14841  for(;;)
14842  {
14843  // Remove old memTypeIndex from list of possibilities.
14844  memoryTypeBits &= ~(1u << memTypeIndex);
14845  // Find alternative memTypeIndex.
14846  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14847  if(res == VK_SUCCESS)
14848  {
14849  alignmentForMemType = VMA_MAX(
14850  vkMemReq.alignment,
14851  GetMemoryTypeMinAlignment(memTypeIndex));
14852 
14853  res = AllocateMemoryOfType(
14854  vkMemReq.size,
14855  alignmentForMemType,
14856  requiresDedicatedAllocation || prefersDedicatedAllocation,
14857  dedicatedBuffer,
14858  dedicatedImage,
14859  createInfo,
14860  memTypeIndex,
14861  suballocType,
14862  allocationCount,
14863  pAllocations);
14864  // Allocation from this alternative memory type succeeded.
14865  if(res == VK_SUCCESS)
14866  {
14867  return res;
14868  }
14869  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14870  }
14871  // No other matching memory type index could be found.
14872  else
14873  {
14874  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14875  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14876  }
14877  }
14878  }
14879  }
14880  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14881  else
14882  return res;
14883  }
14884 }
14885 
14886 void VmaAllocator_T::FreeMemory(
14887  size_t allocationCount,
14888  const VmaAllocation* pAllocations)
14889 {
14890  VMA_ASSERT(pAllocations);
14891 
14892  for(size_t allocIndex = allocationCount; allocIndex--; )
14893  {
14894  VmaAllocation allocation = pAllocations[allocIndex];
14895 
14896  if(allocation != VK_NULL_HANDLE)
14897  {
14898  if(TouchAllocation(allocation))
14899  {
14900  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14901  {
14902  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14903  }
14904 
14905  switch(allocation->GetType())
14906  {
14907  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14908  {
14909  VmaBlockVector* pBlockVector = VMA_NULL;
14910  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14911  if(hPool != VK_NULL_HANDLE)
14912  {
14913  pBlockVector = &hPool->m_BlockVector;
14914  }
14915  else
14916  {
14917  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14918  pBlockVector = m_pBlockVectors[memTypeIndex];
14919  }
14920  pBlockVector->Free(allocation);
14921  }
14922  break;
14923  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14924  FreeDedicatedMemory(allocation);
14925  break;
14926  default:
14927  VMA_ASSERT(0);
14928  }
14929  }
14930 
14931  allocation->SetUserData(this, VMA_NULL);
14932  allocation->Dtor();
14933  m_AllocationObjectAllocator.Free(allocation);
14934  }
14935  }
14936 }
14937 
14938 VkResult VmaAllocator_T::ResizeAllocation(
14939  const VmaAllocation alloc,
14940  VkDeviceSize newSize)
14941 {
14942  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14943  {
14944  return VK_ERROR_VALIDATION_FAILED_EXT;
14945  }
14946  if(newSize == alloc->GetSize())
14947  {
14948  return VK_SUCCESS;
14949  }
14950 
14951  switch(alloc->GetType())
14952  {
14953  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14954  return VK_ERROR_FEATURE_NOT_PRESENT;
14955  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14956  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14957  {
14958  alloc->ChangeSize(newSize);
14959  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14960  return VK_SUCCESS;
14961  }
14962  else
14963  {
14964  return VK_ERROR_OUT_OF_POOL_MEMORY;
14965  }
14966  default:
14967  VMA_ASSERT(0);
14968  return VK_ERROR_VALIDATION_FAILED_EXT;
14969  }
14970 }
14971 
14972 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14973 {
14974  // Initialize.
14975  InitStatInfo(pStats->total);
14976  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14977  InitStatInfo(pStats->memoryType[i]);
14978  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14979  InitStatInfo(pStats->memoryHeap[i]);
14980 
14981  // Process default pools.
14982  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14983  {
14984  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14985  VMA_ASSERT(pBlockVector);
14986  pBlockVector->AddStats(pStats);
14987  }
14988 
14989  // Process custom pools.
14990  {
14991  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14992  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14993  {
14994  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14995  }
14996  }
14997 
14998  // Process dedicated allocations.
14999  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15000  {
15001  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15002  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15003  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15004  VMA_ASSERT(pDedicatedAllocVector);
15005  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15006  {
15007  VmaStatInfo allocationStatInfo;
15008  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15009  VmaAddStatInfo(pStats->total, allocationStatInfo);
15010  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15011  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15012  }
15013  }
15014 
15015  // Postprocess.
15016  VmaPostprocessCalcStatInfo(pStats->total);
15017  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15018  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15019  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15020  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15021 }
15022 
15023 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15024 
15025 VkResult VmaAllocator_T::DefragmentationBegin(
15026  const VmaDefragmentationInfo2& info,
15027  VmaDefragmentationStats* pStats,
15028  VmaDefragmentationContext* pContext)
15029 {
15030  if(info.pAllocationsChanged != VMA_NULL)
15031  {
15032  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15033  }
15034 
15035  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15036  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15037 
15038  (*pContext)->AddPools(info.poolCount, info.pPools);
15039  (*pContext)->AddAllocations(
15041 
15042  VkResult res = (*pContext)->Defragment(
15045  info.commandBuffer, pStats);
15046 
15047  if(res != VK_NOT_READY)
15048  {
15049  vma_delete(this, *pContext);
15050  *pContext = VMA_NULL;
15051  }
15052 
15053  return res;
15054 }
15055 
15056 VkResult VmaAllocator_T::DefragmentationEnd(
15057  VmaDefragmentationContext context)
15058 {
15059  vma_delete(this, context);
15060  return VK_SUCCESS;
15061 }
15062 
15063 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15064 {
15065  if(hAllocation->CanBecomeLost())
15066  {
15067  /*
15068  Warning: This is a carefully designed algorithm.
15069  Do not modify unless you really know what you're doing :)
15070  */
15071  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15072  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15073  for(;;)
15074  {
15075  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15076  {
15077  pAllocationInfo->memoryType = UINT32_MAX;
15078  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15079  pAllocationInfo->offset = 0;
15080  pAllocationInfo->size = hAllocation->GetSize();
15081  pAllocationInfo->pMappedData = VMA_NULL;
15082  pAllocationInfo->pUserData = hAllocation->GetUserData();
15083  return;
15084  }
15085  else if(localLastUseFrameIndex == localCurrFrameIndex)
15086  {
15087  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15088  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15089  pAllocationInfo->offset = hAllocation->GetOffset();
15090  pAllocationInfo->size = hAllocation->GetSize();
15091  pAllocationInfo->pMappedData = VMA_NULL;
15092  pAllocationInfo->pUserData = hAllocation->GetUserData();
15093  return;
15094  }
15095  else // Last use time earlier than current time.
15096  {
15097  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15098  {
15099  localLastUseFrameIndex = localCurrFrameIndex;
15100  }
15101  }
15102  }
15103  }
15104  else
15105  {
15106 #if VMA_STATS_STRING_ENABLED
15107  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15108  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15109  for(;;)
15110  {
15111  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15112  if(localLastUseFrameIndex == localCurrFrameIndex)
15113  {
15114  break;
15115  }
15116  else // Last use time earlier than current time.
15117  {
15118  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15119  {
15120  localLastUseFrameIndex = localCurrFrameIndex;
15121  }
15122  }
15123  }
15124 #endif
15125 
15126  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15127  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15128  pAllocationInfo->offset = hAllocation->GetOffset();
15129  pAllocationInfo->size = hAllocation->GetSize();
15130  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15131  pAllocationInfo->pUserData = hAllocation->GetUserData();
15132  }
15133 }
15134 
15135 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15136 {
15137  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15138  if(hAllocation->CanBecomeLost())
15139  {
15140  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15141  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15142  for(;;)
15143  {
15144  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15145  {
15146  return false;
15147  }
15148  else if(localLastUseFrameIndex == localCurrFrameIndex)
15149  {
15150  return true;
15151  }
15152  else // Last use time earlier than current time.
15153  {
15154  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15155  {
15156  localLastUseFrameIndex = localCurrFrameIndex;
15157  }
15158  }
15159  }
15160  }
15161  else
15162  {
15163 #if VMA_STATS_STRING_ENABLED
15164  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15165  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15166  for(;;)
15167  {
15168  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15169  if(localLastUseFrameIndex == localCurrFrameIndex)
15170  {
15171  break;
15172  }
15173  else // Last use time earlier than current time.
15174  {
15175  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15176  {
15177  localLastUseFrameIndex = localCurrFrameIndex;
15178  }
15179  }
15180  }
15181 #endif
15182 
15183  return true;
15184  }
15185 }
15186 
15187 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15188 {
15189  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15190 
15191  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15192 
15193  if(newCreateInfo.maxBlockCount == 0)
15194  {
15195  newCreateInfo.maxBlockCount = SIZE_MAX;
15196  }
15197  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15198  {
15199  return VK_ERROR_INITIALIZATION_FAILED;
15200  }
15201 
15202  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15203 
15204  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15205 
15206  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15207  if(res != VK_SUCCESS)
15208  {
15209  vma_delete(this, *pPool);
15210  *pPool = VMA_NULL;
15211  return res;
15212  }
15213 
15214  // Add to m_Pools.
15215  {
15216  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15217  (*pPool)->SetId(m_NextPoolId++);
15218  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15219  }
15220 
15221  return VK_SUCCESS;
15222 }
15223 
15224 void VmaAllocator_T::DestroyPool(VmaPool pool)
15225 {
15226  // Remove from m_Pools.
15227  {
15228  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15229  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15230  VMA_ASSERT(success && "Pool not found in Allocator.");
15231  }
15232 
15233  vma_delete(this, pool);
15234 }
15235 
15236 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15237 {
15238  pool->m_BlockVector.GetPoolStats(pPoolStats);
15239 }
15240 
15241 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15242 {
15243  m_CurrentFrameIndex.store(frameIndex);
15244 }
15245 
15246 void VmaAllocator_T::MakePoolAllocationsLost(
15247  VmaPool hPool,
15248  size_t* pLostAllocationCount)
15249 {
15250  hPool->m_BlockVector.MakePoolAllocationsLost(
15251  m_CurrentFrameIndex.load(),
15252  pLostAllocationCount);
15253 }
15254 
15255 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15256 {
15257  return hPool->m_BlockVector.CheckCorruption();
15258 }
15259 
15260 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15261 {
15262  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15263 
15264  // Process default pools.
15265  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15266  {
15267  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15268  {
15269  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15270  VMA_ASSERT(pBlockVector);
15271  VkResult localRes = pBlockVector->CheckCorruption();
15272  switch(localRes)
15273  {
15274  case VK_ERROR_FEATURE_NOT_PRESENT:
15275  break;
15276  case VK_SUCCESS:
15277  finalRes = VK_SUCCESS;
15278  break;
15279  default:
15280  return localRes;
15281  }
15282  }
15283  }
15284 
15285  // Process custom pools.
15286  {
15287  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15288  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15289  {
15290  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15291  {
15292  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15293  switch(localRes)
15294  {
15295  case VK_ERROR_FEATURE_NOT_PRESENT:
15296  break;
15297  case VK_SUCCESS:
15298  finalRes = VK_SUCCESS;
15299  break;
15300  default:
15301  return localRes;
15302  }
15303  }
15304  }
15305  }
15306 
15307  return finalRes;
15308 }
15309 
15310 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15311 {
15312  *pAllocation = m_AllocationObjectAllocator.Allocate();
15313  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15314  (*pAllocation)->InitLost();
15315 }
15316 
15317 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15318 {
15319  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15320 
15321  VkResult res;
15322  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15323  {
15324  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15325  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15326  {
15327  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15328  if(res == VK_SUCCESS)
15329  {
15330  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15331  }
15332  }
15333  else
15334  {
15335  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15336  }
15337  }
15338  else
15339  {
15340  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15341  }
15342 
15343  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15344  {
15345  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15346  }
15347 
15348  return res;
15349 }
15350 
15351 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15352 {
15353  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15354  {
15355  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15356  }
15357 
15358  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15359 
15360  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15361  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15362  {
15363  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15364  m_HeapSizeLimit[heapIndex] += size;
15365  }
15366 }
15367 
15368 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15369 {
15370  if(hAllocation->CanBecomeLost())
15371  {
15372  return VK_ERROR_MEMORY_MAP_FAILED;
15373  }
15374 
15375  switch(hAllocation->GetType())
15376  {
15377  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15378  {
15379  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15380  char *pBytes = VMA_NULL;
15381  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15382  if(res == VK_SUCCESS)
15383  {
15384  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15385  hAllocation->BlockAllocMap();
15386  }
15387  return res;
15388  }
15389  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15390  return hAllocation->DedicatedAllocMap(this, ppData);
15391  default:
15392  VMA_ASSERT(0);
15393  return VK_ERROR_MEMORY_MAP_FAILED;
15394  }
15395 }
15396 
15397 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15398 {
15399  switch(hAllocation->GetType())
15400  {
15401  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15402  {
15403  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15404  hAllocation->BlockAllocUnmap();
15405  pBlock->Unmap(this, 1);
15406  }
15407  break;
15408  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15409  hAllocation->DedicatedAllocUnmap(this);
15410  break;
15411  default:
15412  VMA_ASSERT(0);
15413  }
15414 }
15415 
15416 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15417 {
15418  VkResult res = VK_SUCCESS;
15419  switch(hAllocation->GetType())
15420  {
15421  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15422  res = GetVulkanFunctions().vkBindBufferMemory(
15423  m_hDevice,
15424  hBuffer,
15425  hAllocation->GetMemory(),
15426  0); //memoryOffset
15427  break;
15428  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15429  {
15430  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15431  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15432  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15433  break;
15434  }
15435  default:
15436  VMA_ASSERT(0);
15437  }
15438  return res;
15439 }
15440 
15441 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15442 {
15443  VkResult res = VK_SUCCESS;
15444  switch(hAllocation->GetType())
15445  {
15446  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15447  res = GetVulkanFunctions().vkBindImageMemory(
15448  m_hDevice,
15449  hImage,
15450  hAllocation->GetMemory(),
15451  0); //memoryOffset
15452  break;
15453  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15454  {
15455  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15456  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15457  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15458  break;
15459  }
15460  default:
15461  VMA_ASSERT(0);
15462  }
15463  return res;
15464 }
15465 
15466 void VmaAllocator_T::FlushOrInvalidateAllocation(
15467  VmaAllocation hAllocation,
15468  VkDeviceSize offset, VkDeviceSize size,
15469  VMA_CACHE_OPERATION op)
15470 {
15471  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15472  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15473  {
15474  const VkDeviceSize allocationSize = hAllocation->GetSize();
15475  VMA_ASSERT(offset <= allocationSize);
15476 
15477  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15478 
15479  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15480  memRange.memory = hAllocation->GetMemory();
15481 
15482  switch(hAllocation->GetType())
15483  {
15484  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15485  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15486  if(size == VK_WHOLE_SIZE)
15487  {
15488  memRange.size = allocationSize - memRange.offset;
15489  }
15490  else
15491  {
15492  VMA_ASSERT(offset + size <= allocationSize);
15493  memRange.size = VMA_MIN(
15494  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15495  allocationSize - memRange.offset);
15496  }
15497  break;
15498 
15499  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15500  {
15501  // 1. Still within this allocation.
15502  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15503  if(size == VK_WHOLE_SIZE)
15504  {
15505  size = allocationSize - offset;
15506  }
15507  else
15508  {
15509  VMA_ASSERT(offset + size <= allocationSize);
15510  }
15511  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15512 
15513  // 2. Adjust to whole block.
15514  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15515  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15516  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15517  memRange.offset += allocationOffset;
15518  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15519 
15520  break;
15521  }
15522 
15523  default:
15524  VMA_ASSERT(0);
15525  }
15526 
15527  switch(op)
15528  {
15529  case VMA_CACHE_FLUSH:
15530  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15531  break;
15532  case VMA_CACHE_INVALIDATE:
15533  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15534  break;
15535  default:
15536  VMA_ASSERT(0);
15537  }
15538  }
15539  // else: Just ignore this call.
15540 }
15541 
15542 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15543 {
15544  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15545 
15546  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15547  {
15548  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15549  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15550  VMA_ASSERT(pDedicatedAllocations);
15551  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15552  VMA_ASSERT(success);
15553  }
15554 
15555  VkDeviceMemory hMemory = allocation->GetMemory();
15556 
15557  /*
15558  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15559  before vkFreeMemory.
15560 
15561  if(allocation->GetMappedData() != VMA_NULL)
15562  {
15563  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15564  }
15565  */
15566 
15567  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15568 
15569  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15570 }
15571 
15572 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15573 {
15574  VkBufferCreateInfo dummyBufCreateInfo;
15575  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15576 
15577  uint32_t memoryTypeBits = 0;
15578 
15579  // Create buffer.
15580  VkBuffer buf = VMA_NULL;
15581  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15582  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15583  if(res == VK_SUCCESS)
15584  {
15585  // Query for supported memory types.
15586  VkMemoryRequirements memReq;
15587  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15588  memoryTypeBits = memReq.memoryTypeBits;
15589 
15590  // Destroy buffer.
15591  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15592  }
15593 
15594  return memoryTypeBits;
15595 }
15596 
15597 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15598 {
15599  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15600  !hAllocation->CanBecomeLost() &&
15601  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15602  {
15603  void* pData = VMA_NULL;
15604  VkResult res = Map(hAllocation, &pData);
15605  if(res == VK_SUCCESS)
15606  {
15607  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15608  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15609  Unmap(hAllocation);
15610  }
15611  else
15612  {
15613  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15614  }
15615  }
15616 }
15617 
15618 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15619 {
15620  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15621  if(memoryTypeBits == UINT32_MAX)
15622  {
15623  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15624  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15625  }
15626  return memoryTypeBits;
15627 }
15628 
15629 #if VMA_STATS_STRING_ENABLED
15630 
15631 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15632 {
15633  bool dedicatedAllocationsStarted = false;
15634  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15635  {
15636  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15637  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15638  VMA_ASSERT(pDedicatedAllocVector);
15639  if(pDedicatedAllocVector->empty() == false)
15640  {
15641  if(dedicatedAllocationsStarted == false)
15642  {
15643  dedicatedAllocationsStarted = true;
15644  json.WriteString("DedicatedAllocations");
15645  json.BeginObject();
15646  }
15647 
15648  json.BeginString("Type ");
15649  json.ContinueString(memTypeIndex);
15650  json.EndString();
15651 
15652  json.BeginArray();
15653 
15654  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15655  {
15656  json.BeginObject(true);
15657  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15658  hAlloc->PrintParameters(json);
15659  json.EndObject();
15660  }
15661 
15662  json.EndArray();
15663  }
15664  }
15665  if(dedicatedAllocationsStarted)
15666  {
15667  json.EndObject();
15668  }
15669 
15670  {
15671  bool allocationsStarted = false;
15672  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15673  {
15674  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15675  {
15676  if(allocationsStarted == false)
15677  {
15678  allocationsStarted = true;
15679  json.WriteString("DefaultPools");
15680  json.BeginObject();
15681  }
15682 
15683  json.BeginString("Type ");
15684  json.ContinueString(memTypeIndex);
15685  json.EndString();
15686 
15687  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15688  }
15689  }
15690  if(allocationsStarted)
15691  {
15692  json.EndObject();
15693  }
15694  }
15695 
15696  // Custom pools
15697  {
15698  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15699  const size_t poolCount = m_Pools.size();
15700  if(poolCount > 0)
15701  {
15702  json.WriteString("Pools");
15703  json.BeginObject();
15704  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15705  {
15706  json.BeginString();
15707  json.ContinueString(m_Pools[poolIndex]->GetId());
15708  json.EndString();
15709 
15710  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15711  }
15712  json.EndObject();
15713  }
15714  }
15715 }
15716 
15717 #endif // #if VMA_STATS_STRING_ENABLED
15718 
15720 // Public interface
15721 
15722 VkResult vmaCreateAllocator(
15723  const VmaAllocatorCreateInfo* pCreateInfo,
15724  VmaAllocator* pAllocator)
15725 {
15726  VMA_ASSERT(pCreateInfo && pAllocator);
15727  VMA_DEBUG_LOG("vmaCreateAllocator");
15728  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15729  return (*pAllocator)->Init(pCreateInfo);
15730 }
15731 
15732 void vmaDestroyAllocator(
15733  VmaAllocator allocator)
15734 {
15735  if(allocator != VK_NULL_HANDLE)
15736  {
15737  VMA_DEBUG_LOG("vmaDestroyAllocator");
15738  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15739  vma_delete(&allocationCallbacks, allocator);
15740  }
15741 }
15742 
15744  VmaAllocator allocator,
15745  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15746 {
15747  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15748  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15749 }
15750 
15752  VmaAllocator allocator,
15753  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15754 {
15755  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15756  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15757 }
15758 
15760  VmaAllocator allocator,
15761  uint32_t memoryTypeIndex,
15762  VkMemoryPropertyFlags* pFlags)
15763 {
15764  VMA_ASSERT(allocator && pFlags);
15765  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15766  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15767 }
15768 
15770  VmaAllocator allocator,
15771  uint32_t frameIndex)
15772 {
15773  VMA_ASSERT(allocator);
15774  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15775 
15776  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15777 
15778  allocator->SetCurrentFrameIndex(frameIndex);
15779 }
15780 
15781 void vmaCalculateStats(
15782  VmaAllocator allocator,
15783  VmaStats* pStats)
15784 {
15785  VMA_ASSERT(allocator && pStats);
15786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15787  allocator->CalculateStats(pStats);
15788 }
15789 
15790 #if VMA_STATS_STRING_ENABLED
15791 
15792 void vmaBuildStatsString(
15793  VmaAllocator allocator,
15794  char** ppStatsString,
15795  VkBool32 detailedMap)
15796 {
15797  VMA_ASSERT(allocator && ppStatsString);
15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15799 
15800  VmaStringBuilder sb(allocator);
15801  {
15802  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15803  json.BeginObject();
15804 
15805  VmaStats stats;
15806  allocator->CalculateStats(&stats);
15807 
15808  json.WriteString("Total");
15809  VmaPrintStatInfo(json, stats.total);
15810 
15811  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15812  {
15813  json.BeginString("Heap ");
15814  json.ContinueString(heapIndex);
15815  json.EndString();
15816  json.BeginObject();
15817 
15818  json.WriteString("Size");
15819  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15820 
15821  json.WriteString("Flags");
15822  json.BeginArray(true);
15823  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15824  {
15825  json.WriteString("DEVICE_LOCAL");
15826  }
15827  json.EndArray();
15828 
15829  if(stats.memoryHeap[heapIndex].blockCount > 0)
15830  {
15831  json.WriteString("Stats");
15832  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15833  }
15834 
15835  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15836  {
15837  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15838  {
15839  json.BeginString("Type ");
15840  json.ContinueString(typeIndex);
15841  json.EndString();
15842 
15843  json.BeginObject();
15844 
15845  json.WriteString("Flags");
15846  json.BeginArray(true);
15847  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15848  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15849  {
15850  json.WriteString("DEVICE_LOCAL");
15851  }
15852  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15853  {
15854  json.WriteString("HOST_VISIBLE");
15855  }
15856  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15857  {
15858  json.WriteString("HOST_COHERENT");
15859  }
15860  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15861  {
15862  json.WriteString("HOST_CACHED");
15863  }
15864  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15865  {
15866  json.WriteString("LAZILY_ALLOCATED");
15867  }
15868  json.EndArray();
15869 
15870  if(stats.memoryType[typeIndex].blockCount > 0)
15871  {
15872  json.WriteString("Stats");
15873  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15874  }
15875 
15876  json.EndObject();
15877  }
15878  }
15879 
15880  json.EndObject();
15881  }
15882  if(detailedMap == VK_TRUE)
15883  {
15884  allocator->PrintDetailedMap(json);
15885  }
15886 
15887  json.EndObject();
15888  }
15889 
15890  const size_t len = sb.GetLength();
15891  char* const pChars = vma_new_array(allocator, char, len + 1);
15892  if(len > 0)
15893  {
15894  memcpy(pChars, sb.GetData(), len);
15895  }
15896  pChars[len] = '\0';
15897  *ppStatsString = pChars;
15898 }
15899 
15900 void vmaFreeStatsString(
15901  VmaAllocator allocator,
15902  char* pStatsString)
15903 {
15904  if(pStatsString != VMA_NULL)
15905  {
15906  VMA_ASSERT(allocator);
15907  size_t len = strlen(pStatsString);
15908  vma_delete_array(allocator, pStatsString, len + 1);
15909  }
15910 }
15911 
15912 #endif // #if VMA_STATS_STRING_ENABLED
15913 
15914 /*
15915 This function is not protected by any mutex because it just reads immutable data.
15916 */
15917 VkResult vmaFindMemoryTypeIndex(
15918  VmaAllocator allocator,
15919  uint32_t memoryTypeBits,
15920  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15921  uint32_t* pMemoryTypeIndex)
15922 {
15923  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15924  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15925  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15926 
15927  if(pAllocationCreateInfo->memoryTypeBits != 0)
15928  {
15929  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15930  }
15931 
15932  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15933  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15934 
15935  // Convert usage to requiredFlags and preferredFlags.
15936  switch(pAllocationCreateInfo->usage)
15937  {
15939  break;
15941  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15942  {
15943  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15944  }
15945  break;
15947  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15948  break;
15950  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15951  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15952  {
15953  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15954  }
15955  break;
15957  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15958  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15959  break;
15960  default:
15961  break;
15962  }
15963 
15964  *pMemoryTypeIndex = UINT32_MAX;
15965  uint32_t minCost = UINT32_MAX;
15966  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15967  memTypeIndex < allocator->GetMemoryTypeCount();
15968  ++memTypeIndex, memTypeBit <<= 1)
15969  {
15970  // This memory type is acceptable according to memoryTypeBits bitmask.
15971  if((memTypeBit & memoryTypeBits) != 0)
15972  {
15973  const VkMemoryPropertyFlags currFlags =
15974  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15975  // This memory type contains requiredFlags.
15976  if((requiredFlags & ~currFlags) == 0)
15977  {
15978  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15979  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15980  // Remember memory type with lowest cost.
15981  if(currCost < minCost)
15982  {
15983  *pMemoryTypeIndex = memTypeIndex;
15984  if(currCost == 0)
15985  {
15986  return VK_SUCCESS;
15987  }
15988  minCost = currCost;
15989  }
15990  }
15991  }
15992  }
15993  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15994 }
15995 
15997  VmaAllocator allocator,
15998  const VkBufferCreateInfo* pBufferCreateInfo,
15999  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16000  uint32_t* pMemoryTypeIndex)
16001 {
16002  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16003  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16004  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16005  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16006 
16007  const VkDevice hDev = allocator->m_hDevice;
16008  VkBuffer hBuffer = VK_NULL_HANDLE;
16009  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16010  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16011  if(res == VK_SUCCESS)
16012  {
16013  VkMemoryRequirements memReq = {};
16014  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16015  hDev, hBuffer, &memReq);
16016 
16017  res = vmaFindMemoryTypeIndex(
16018  allocator,
16019  memReq.memoryTypeBits,
16020  pAllocationCreateInfo,
16021  pMemoryTypeIndex);
16022 
16023  allocator->GetVulkanFunctions().vkDestroyBuffer(
16024  hDev, hBuffer, allocator->GetAllocationCallbacks());
16025  }
16026  return res;
16027 }
16028 
16030  VmaAllocator allocator,
16031  const VkImageCreateInfo* pImageCreateInfo,
16032  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16033  uint32_t* pMemoryTypeIndex)
16034 {
16035  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16036  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16037  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16038  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16039 
16040  const VkDevice hDev = allocator->m_hDevice;
16041  VkImage hImage = VK_NULL_HANDLE;
16042  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16043  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16044  if(res == VK_SUCCESS)
16045  {
16046  VkMemoryRequirements memReq = {};
16047  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16048  hDev, hImage, &memReq);
16049 
16050  res = vmaFindMemoryTypeIndex(
16051  allocator,
16052  memReq.memoryTypeBits,
16053  pAllocationCreateInfo,
16054  pMemoryTypeIndex);
16055 
16056  allocator->GetVulkanFunctions().vkDestroyImage(
16057  hDev, hImage, allocator->GetAllocationCallbacks());
16058  }
16059  return res;
16060 }
16061 
16062 VkResult vmaCreatePool(
16063  VmaAllocator allocator,
16064  const VmaPoolCreateInfo* pCreateInfo,
16065  VmaPool* pPool)
16066 {
16067  VMA_ASSERT(allocator && pCreateInfo && pPool);
16068 
16069  VMA_DEBUG_LOG("vmaCreatePool");
16070 
16071  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16072 
16073  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16074 
16075 #if VMA_RECORDING_ENABLED
16076  if(allocator->GetRecorder() != VMA_NULL)
16077  {
16078  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16079  }
16080 #endif
16081 
16082  return res;
16083 }
16084 
16085 void vmaDestroyPool(
16086  VmaAllocator allocator,
16087  VmaPool pool)
16088 {
16089  VMA_ASSERT(allocator);
16090 
16091  if(pool == VK_NULL_HANDLE)
16092  {
16093  return;
16094  }
16095 
16096  VMA_DEBUG_LOG("vmaDestroyPool");
16097 
16098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16099 
16100 #if VMA_RECORDING_ENABLED
16101  if(allocator->GetRecorder() != VMA_NULL)
16102  {
16103  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16104  }
16105 #endif
16106 
16107  allocator->DestroyPool(pool);
16108 }
16109 
16110 void vmaGetPoolStats(
16111  VmaAllocator allocator,
16112  VmaPool pool,
16113  VmaPoolStats* pPoolStats)
16114 {
16115  VMA_ASSERT(allocator && pool && pPoolStats);
16116 
16117  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16118 
16119  allocator->GetPoolStats(pool, pPoolStats);
16120 }
16121 
16123  VmaAllocator allocator,
16124  VmaPool pool,
16125  size_t* pLostAllocationCount)
16126 {
16127  VMA_ASSERT(allocator && pool);
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131 #if VMA_RECORDING_ENABLED
16132  if(allocator->GetRecorder() != VMA_NULL)
16133  {
16134  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16135  }
16136 #endif
16137 
16138  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16139 }
16140 
16141 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16142 {
16143  VMA_ASSERT(allocator && pool);
16144 
16145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16146 
16147  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16148 
16149  return allocator->CheckPoolCorruption(pool);
16150 }
16151 
16152 VkResult vmaAllocateMemory(
16153  VmaAllocator allocator,
16154  const VkMemoryRequirements* pVkMemoryRequirements,
16155  const VmaAllocationCreateInfo* pCreateInfo,
16156  VmaAllocation* pAllocation,
16157  VmaAllocationInfo* pAllocationInfo)
16158 {
16159  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16160 
16161  VMA_DEBUG_LOG("vmaAllocateMemory");
16162 
16163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16164 
16165  VkResult result = allocator->AllocateMemory(
16166  *pVkMemoryRequirements,
16167  false, // requiresDedicatedAllocation
16168  false, // prefersDedicatedAllocation
16169  VK_NULL_HANDLE, // dedicatedBuffer
16170  VK_NULL_HANDLE, // dedicatedImage
16171  *pCreateInfo,
16172  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16173  1, // allocationCount
16174  pAllocation);
16175 
16176 #if VMA_RECORDING_ENABLED
16177  if(allocator->GetRecorder() != VMA_NULL)
16178  {
16179  allocator->GetRecorder()->RecordAllocateMemory(
16180  allocator->GetCurrentFrameIndex(),
16181  *pVkMemoryRequirements,
16182  *pCreateInfo,
16183  *pAllocation);
16184  }
16185 #endif
16186 
16187  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16188  {
16189  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16190  }
16191 
16192  return result;
16193 }
16194 
16195 VkResult vmaAllocateMemoryPages(
16196  VmaAllocator allocator,
16197  const VkMemoryRequirements* pVkMemoryRequirements,
16198  const VmaAllocationCreateInfo* pCreateInfo,
16199  size_t allocationCount,
16200  VmaAllocation* pAllocations,
16201  VmaAllocationInfo* pAllocationInfo)
16202 {
16203  if(allocationCount == 0)
16204  {
16205  return VK_SUCCESS;
16206  }
16207 
16208  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16209 
16210  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16211 
16212  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16213 
16214  VkResult result = allocator->AllocateMemory(
16215  *pVkMemoryRequirements,
16216  false, // requiresDedicatedAllocation
16217  false, // prefersDedicatedAllocation
16218  VK_NULL_HANDLE, // dedicatedBuffer
16219  VK_NULL_HANDLE, // dedicatedImage
16220  *pCreateInfo,
16221  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16222  allocationCount,
16223  pAllocations);
16224 
16225 #if VMA_RECORDING_ENABLED
16226  if(allocator->GetRecorder() != VMA_NULL)
16227  {
16228  allocator->GetRecorder()->RecordAllocateMemoryPages(
16229  allocator->GetCurrentFrameIndex(),
16230  *pVkMemoryRequirements,
16231  *pCreateInfo,
16232  (uint64_t)allocationCount,
16233  pAllocations);
16234  }
16235 #endif
16236 
16237  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16238  {
16239  for(size_t i = 0; i < allocationCount; ++i)
16240  {
16241  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16242  }
16243  }
16244 
16245  return result;
16246 }
16247 
16249  VmaAllocator allocator,
16250  VkBuffer buffer,
16251  const VmaAllocationCreateInfo* pCreateInfo,
16252  VmaAllocation* pAllocation,
16253  VmaAllocationInfo* pAllocationInfo)
16254 {
16255  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16256 
16257  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16258 
16259  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16260 
16261  VkMemoryRequirements vkMemReq = {};
16262  bool requiresDedicatedAllocation = false;
16263  bool prefersDedicatedAllocation = false;
16264  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16265  requiresDedicatedAllocation,
16266  prefersDedicatedAllocation);
16267 
16268  VkResult result = allocator->AllocateMemory(
16269  vkMemReq,
16270  requiresDedicatedAllocation,
16271  prefersDedicatedAllocation,
16272  buffer, // dedicatedBuffer
16273  VK_NULL_HANDLE, // dedicatedImage
16274  *pCreateInfo,
16275  VMA_SUBALLOCATION_TYPE_BUFFER,
16276  1, // allocationCount
16277  pAllocation);
16278 
16279 #if VMA_RECORDING_ENABLED
16280  if(allocator->GetRecorder() != VMA_NULL)
16281  {
16282  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16283  allocator->GetCurrentFrameIndex(),
16284  vkMemReq,
16285  requiresDedicatedAllocation,
16286  prefersDedicatedAllocation,
16287  *pCreateInfo,
16288  *pAllocation);
16289  }
16290 #endif
16291 
16292  if(pAllocationInfo && result == VK_SUCCESS)
16293  {
16294  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16295  }
16296 
16297  return result;
16298 }
16299 
16300 VkResult vmaAllocateMemoryForImage(
16301  VmaAllocator allocator,
16302  VkImage image,
16303  const VmaAllocationCreateInfo* pCreateInfo,
16304  VmaAllocation* pAllocation,
16305  VmaAllocationInfo* pAllocationInfo)
16306 {
16307  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16308 
16309  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16310 
16311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16312 
16313  VkMemoryRequirements vkMemReq = {};
16314  bool requiresDedicatedAllocation = false;
16315  bool prefersDedicatedAllocation = false;
16316  allocator->GetImageMemoryRequirements(image, vkMemReq,
16317  requiresDedicatedAllocation, prefersDedicatedAllocation);
16318 
16319  VkResult result = allocator->AllocateMemory(
16320  vkMemReq,
16321  requiresDedicatedAllocation,
16322  prefersDedicatedAllocation,
16323  VK_NULL_HANDLE, // dedicatedBuffer
16324  image, // dedicatedImage
16325  *pCreateInfo,
16326  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16327  1, // allocationCount
16328  pAllocation);
16329 
16330 #if VMA_RECORDING_ENABLED
16331  if(allocator->GetRecorder() != VMA_NULL)
16332  {
16333  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16334  allocator->GetCurrentFrameIndex(),
16335  vkMemReq,
16336  requiresDedicatedAllocation,
16337  prefersDedicatedAllocation,
16338  *pCreateInfo,
16339  *pAllocation);
16340  }
16341 #endif
16342 
16343  if(pAllocationInfo && result == VK_SUCCESS)
16344  {
16345  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16346  }
16347 
16348  return result;
16349 }
16350 
16351 void vmaFreeMemory(
16352  VmaAllocator allocator,
16353  VmaAllocation allocation)
16354 {
16355  VMA_ASSERT(allocator);
16356 
16357  if(allocation == VK_NULL_HANDLE)
16358  {
16359  return;
16360  }
16361 
16362  VMA_DEBUG_LOG("vmaFreeMemory");
16363 
16364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16365 
16366 #if VMA_RECORDING_ENABLED
16367  if(allocator->GetRecorder() != VMA_NULL)
16368  {
16369  allocator->GetRecorder()->RecordFreeMemory(
16370  allocator->GetCurrentFrameIndex(),
16371  allocation);
16372  }
16373 #endif
16374 
16375  allocator->FreeMemory(
16376  1, // allocationCount
16377  &allocation);
16378 }
16379 
16380 void vmaFreeMemoryPages(
16381  VmaAllocator allocator,
16382  size_t allocationCount,
16383  VmaAllocation* pAllocations)
16384 {
16385  if(allocationCount == 0)
16386  {
16387  return;
16388  }
16389 
16390  VMA_ASSERT(allocator);
16391 
16392  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16393 
16394  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16395 
16396 #if VMA_RECORDING_ENABLED
16397  if(allocator->GetRecorder() != VMA_NULL)
16398  {
16399  allocator->GetRecorder()->RecordFreeMemoryPages(
16400  allocator->GetCurrentFrameIndex(),
16401  (uint64_t)allocationCount,
16402  pAllocations);
16403  }
16404 #endif
16405 
16406  allocator->FreeMemory(allocationCount, pAllocations);
16407 }
16408 
16409 VkResult vmaResizeAllocation(
16410  VmaAllocator allocator,
16411  VmaAllocation allocation,
16412  VkDeviceSize newSize)
16413 {
16414  VMA_ASSERT(allocator && allocation);
16415 
16416  VMA_DEBUG_LOG("vmaResizeAllocation");
16417 
16418  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16419 
16420 #if VMA_RECORDING_ENABLED
16421  if(allocator->GetRecorder() != VMA_NULL)
16422  {
16423  allocator->GetRecorder()->RecordResizeAllocation(
16424  allocator->GetCurrentFrameIndex(),
16425  allocation,
16426  newSize);
16427  }
16428 #endif
16429 
16430  return allocator->ResizeAllocation(allocation, newSize);
16431 }
16432 
16434  VmaAllocator allocator,
16435  VmaAllocation allocation,
16436  VmaAllocationInfo* pAllocationInfo)
16437 {
16438  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16439 
16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16441 
16442 #if VMA_RECORDING_ENABLED
16443  if(allocator->GetRecorder() != VMA_NULL)
16444  {
16445  allocator->GetRecorder()->RecordGetAllocationInfo(
16446  allocator->GetCurrentFrameIndex(),
16447  allocation);
16448  }
16449 #endif
16450 
16451  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16452 }
16453 
16454 VkBool32 vmaTouchAllocation(
16455  VmaAllocator allocator,
16456  VmaAllocation allocation)
16457 {
16458  VMA_ASSERT(allocator && allocation);
16459 
16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16461 
16462 #if VMA_RECORDING_ENABLED
16463  if(allocator->GetRecorder() != VMA_NULL)
16464  {
16465  allocator->GetRecorder()->RecordTouchAllocation(
16466  allocator->GetCurrentFrameIndex(),
16467  allocation);
16468  }
16469 #endif
16470 
16471  return allocator->TouchAllocation(allocation);
16472 }
16473 
16475  VmaAllocator allocator,
16476  VmaAllocation allocation,
16477  void* pUserData)
16478 {
16479  VMA_ASSERT(allocator && allocation);
16480 
16481  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16482 
16483  allocation->SetUserData(allocator, pUserData);
16484 
16485 #if VMA_RECORDING_ENABLED
16486  if(allocator->GetRecorder() != VMA_NULL)
16487  {
16488  allocator->GetRecorder()->RecordSetAllocationUserData(
16489  allocator->GetCurrentFrameIndex(),
16490  allocation,
16491  pUserData);
16492  }
16493 #endif
16494 }
16495 
16497  VmaAllocator allocator,
16498  VmaAllocation* pAllocation)
16499 {
16500  VMA_ASSERT(allocator && pAllocation);
16501 
16502  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16503 
16504  allocator->CreateLostAllocation(pAllocation);
16505 
16506 #if VMA_RECORDING_ENABLED
16507  if(allocator->GetRecorder() != VMA_NULL)
16508  {
16509  allocator->GetRecorder()->RecordCreateLostAllocation(
16510  allocator->GetCurrentFrameIndex(),
16511  *pAllocation);
16512  }
16513 #endif
16514 }
16515 
16516 VkResult vmaMapMemory(
16517  VmaAllocator allocator,
16518  VmaAllocation allocation,
16519  void** ppData)
16520 {
16521  VMA_ASSERT(allocator && allocation && ppData);
16522 
16523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16524 
16525  VkResult res = allocator->Map(allocation, ppData);
16526 
16527 #if VMA_RECORDING_ENABLED
16528  if(allocator->GetRecorder() != VMA_NULL)
16529  {
16530  allocator->GetRecorder()->RecordMapMemory(
16531  allocator->GetCurrentFrameIndex(),
16532  allocation);
16533  }
16534 #endif
16535 
16536  return res;
16537 }
16538 
16539 void vmaUnmapMemory(
16540  VmaAllocator allocator,
16541  VmaAllocation allocation)
16542 {
16543  VMA_ASSERT(allocator && allocation);
16544 
16545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16546 
16547 #if VMA_RECORDING_ENABLED
16548  if(allocator->GetRecorder() != VMA_NULL)
16549  {
16550  allocator->GetRecorder()->RecordUnmapMemory(
16551  allocator->GetCurrentFrameIndex(),
16552  allocation);
16553  }
16554 #endif
16555 
16556  allocator->Unmap(allocation);
16557 }
16558 
16559 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16560 {
16561  VMA_ASSERT(allocator && allocation);
16562 
16563  VMA_DEBUG_LOG("vmaFlushAllocation");
16564 
16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16566 
16567  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16568 
16569 #if VMA_RECORDING_ENABLED
16570  if(allocator->GetRecorder() != VMA_NULL)
16571  {
16572  allocator->GetRecorder()->RecordFlushAllocation(
16573  allocator->GetCurrentFrameIndex(),
16574  allocation, offset, size);
16575  }
16576 #endif
16577 }
16578 
16579 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16580 {
16581  VMA_ASSERT(allocator && allocation);
16582 
16583  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16584 
16585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16586 
16587  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16588 
16589 #if VMA_RECORDING_ENABLED
16590  if(allocator->GetRecorder() != VMA_NULL)
16591  {
16592  allocator->GetRecorder()->RecordInvalidateAllocation(
16593  allocator->GetCurrentFrameIndex(),
16594  allocation, offset, size);
16595  }
16596 #endif
16597 }
16598 
16599 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16600 {
16601  VMA_ASSERT(allocator);
16602 
16603  VMA_DEBUG_LOG("vmaCheckCorruption");
16604 
16605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16606 
16607  return allocator->CheckCorruption(memoryTypeBits);
16608 }
16609 
16610 VkResult vmaDefragment(
16611  VmaAllocator allocator,
16612  VmaAllocation* pAllocations,
16613  size_t allocationCount,
16614  VkBool32* pAllocationsChanged,
16615  const VmaDefragmentationInfo *pDefragmentationInfo,
16616  VmaDefragmentationStats* pDefragmentationStats)
16617 {
16618  // Deprecated interface, reimplemented using new one.
16619 
16620  VmaDefragmentationInfo2 info2 = {};
16621  info2.allocationCount = (uint32_t)allocationCount;
16622  info2.pAllocations = pAllocations;
16623  info2.pAllocationsChanged = pAllocationsChanged;
16624  if(pDefragmentationInfo != VMA_NULL)
16625  {
16626  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16627  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16628  }
16629  else
16630  {
16631  info2.maxCpuAllocationsToMove = UINT32_MAX;
16632  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16633  }
16634  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16635 
16637  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16638  if(res == VK_NOT_READY)
16639  {
16640  res = vmaDefragmentationEnd( allocator, ctx);
16641  }
16642  return res;
16643 }
16644 
16645 VkResult vmaDefragmentationBegin(
16646  VmaAllocator allocator,
16647  const VmaDefragmentationInfo2* pInfo,
16648  VmaDefragmentationStats* pStats,
16649  VmaDefragmentationContext *pContext)
16650 {
16651  VMA_ASSERT(allocator && pInfo && pContext);
16652 
16653  // Degenerate case: Nothing to defragment.
16654  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16655  {
16656  return VK_SUCCESS;
16657  }
16658 
16659  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16660  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16661  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16662  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16663 
16664  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16665 
16666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16667 
16668  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16669 
16670 #if VMA_RECORDING_ENABLED
16671  if(allocator->GetRecorder() != VMA_NULL)
16672  {
16673  allocator->GetRecorder()->RecordDefragmentationBegin(
16674  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16675  }
16676 #endif
16677 
16678  return res;
16679 }
16680 
16681 VkResult vmaDefragmentationEnd(
16682  VmaAllocator allocator,
16683  VmaDefragmentationContext context)
16684 {
16685  VMA_ASSERT(allocator);
16686 
16687  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16688 
16689  if(context != VK_NULL_HANDLE)
16690  {
16691  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16692 
16693 #if VMA_RECORDING_ENABLED
16694  if(allocator->GetRecorder() != VMA_NULL)
16695  {
16696  allocator->GetRecorder()->RecordDefragmentationEnd(
16697  allocator->GetCurrentFrameIndex(), context);
16698  }
16699 #endif
16700 
16701  return allocator->DefragmentationEnd(context);
16702  }
16703  else
16704  {
16705  return VK_SUCCESS;
16706  }
16707 }
16708 
16709 VkResult vmaBindBufferMemory(
16710  VmaAllocator allocator,
16711  VmaAllocation allocation,
16712  VkBuffer buffer)
16713 {
16714  VMA_ASSERT(allocator && allocation && buffer);
16715 
16716  VMA_DEBUG_LOG("vmaBindBufferMemory");
16717 
16718  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16719 
16720  return allocator->BindBufferMemory(allocation, buffer);
16721 }
16722 
16723 VkResult vmaBindImageMemory(
16724  VmaAllocator allocator,
16725  VmaAllocation allocation,
16726  VkImage image)
16727 {
16728  VMA_ASSERT(allocator && allocation && image);
16729 
16730  VMA_DEBUG_LOG("vmaBindImageMemory");
16731 
16732  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16733 
16734  return allocator->BindImageMemory(allocation, image);
16735 }
16736 
16737 VkResult vmaCreateBuffer(
16738  VmaAllocator allocator,
16739  const VkBufferCreateInfo* pBufferCreateInfo,
16740  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16741  VkBuffer* pBuffer,
16742  VmaAllocation* pAllocation,
16743  VmaAllocationInfo* pAllocationInfo)
16744 {
16745  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16746 
16747  if(pBufferCreateInfo->size == 0)
16748  {
16749  return VK_ERROR_VALIDATION_FAILED_EXT;
16750  }
16751 
16752  VMA_DEBUG_LOG("vmaCreateBuffer");
16753 
16754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16755 
16756  *pBuffer = VK_NULL_HANDLE;
16757  *pAllocation = VK_NULL_HANDLE;
16758 
16759  // 1. Create VkBuffer.
16760  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16761  allocator->m_hDevice,
16762  pBufferCreateInfo,
16763  allocator->GetAllocationCallbacks(),
16764  pBuffer);
16765  if(res >= 0)
16766  {
16767  // 2. vkGetBufferMemoryRequirements.
16768  VkMemoryRequirements vkMemReq = {};
16769  bool requiresDedicatedAllocation = false;
16770  bool prefersDedicatedAllocation = false;
16771  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16772  requiresDedicatedAllocation, prefersDedicatedAllocation);
16773 
16774  // Make sure alignment requirements for specific buffer usages reported
16775  // in Physical Device Properties are included in alignment reported by memory requirements.
16776  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16777  {
16778  VMA_ASSERT(vkMemReq.alignment %
16779  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16780  }
16781  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16782  {
16783  VMA_ASSERT(vkMemReq.alignment %
16784  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16785  }
16786  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16787  {
16788  VMA_ASSERT(vkMemReq.alignment %
16789  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16790  }
16791 
16792  // 3. Allocate memory using allocator.
16793  res = allocator->AllocateMemory(
16794  vkMemReq,
16795  requiresDedicatedAllocation,
16796  prefersDedicatedAllocation,
16797  *pBuffer, // dedicatedBuffer
16798  VK_NULL_HANDLE, // dedicatedImage
16799  *pAllocationCreateInfo,
16800  VMA_SUBALLOCATION_TYPE_BUFFER,
16801  1, // allocationCount
16802  pAllocation);
16803 
16804 #if VMA_RECORDING_ENABLED
16805  if(allocator->GetRecorder() != VMA_NULL)
16806  {
16807  allocator->GetRecorder()->RecordCreateBuffer(
16808  allocator->GetCurrentFrameIndex(),
16809  *pBufferCreateInfo,
16810  *pAllocationCreateInfo,
16811  *pAllocation);
16812  }
16813 #endif
16814 
16815  if(res >= 0)
16816  {
16817  // 3. Bind buffer with memory.
16818  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16819  {
16820  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16821  }
16822  if(res >= 0)
16823  {
16824  // All steps succeeded.
16825  #if VMA_STATS_STRING_ENABLED
16826  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16827  #endif
16828  if(pAllocationInfo != VMA_NULL)
16829  {
16830  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16831  }
16832 
16833  return VK_SUCCESS;
16834  }
16835  allocator->FreeMemory(
16836  1, // allocationCount
16837  pAllocation);
16838  *pAllocation = VK_NULL_HANDLE;
16839  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16840  *pBuffer = VK_NULL_HANDLE;
16841  return res;
16842  }
16843  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16844  *pBuffer = VK_NULL_HANDLE;
16845  return res;
16846  }
16847  return res;
16848 }
16849 
16850 void vmaDestroyBuffer(
16851  VmaAllocator allocator,
16852  VkBuffer buffer,
16853  VmaAllocation allocation)
16854 {
16855  VMA_ASSERT(allocator);
16856 
16857  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16858  {
16859  return;
16860  }
16861 
16862  VMA_DEBUG_LOG("vmaDestroyBuffer");
16863 
16864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16865 
16866 #if VMA_RECORDING_ENABLED
16867  if(allocator->GetRecorder() != VMA_NULL)
16868  {
16869  allocator->GetRecorder()->RecordDestroyBuffer(
16870  allocator->GetCurrentFrameIndex(),
16871  allocation);
16872  }
16873 #endif
16874 
16875  if(buffer != VK_NULL_HANDLE)
16876  {
16877  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16878  }
16879 
16880  if(allocation != VK_NULL_HANDLE)
16881  {
16882  allocator->FreeMemory(
16883  1, // allocationCount
16884  &allocation);
16885  }
16886 }
16887 
16888 VkResult vmaCreateImage(
16889  VmaAllocator allocator,
16890  const VkImageCreateInfo* pImageCreateInfo,
16891  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16892  VkImage* pImage,
16893  VmaAllocation* pAllocation,
16894  VmaAllocationInfo* pAllocationInfo)
16895 {
16896  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16897 
16898  if(pImageCreateInfo->extent.width == 0 ||
16899  pImageCreateInfo->extent.height == 0 ||
16900  pImageCreateInfo->extent.depth == 0 ||
16901  pImageCreateInfo->mipLevels == 0 ||
16902  pImageCreateInfo->arrayLayers == 0)
16903  {
16904  return VK_ERROR_VALIDATION_FAILED_EXT;
16905  }
16906 
16907  VMA_DEBUG_LOG("vmaCreateImage");
16908 
16909  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16910 
16911  *pImage = VK_NULL_HANDLE;
16912  *pAllocation = VK_NULL_HANDLE;
16913 
16914  // 1. Create VkImage.
16915  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16916  allocator->m_hDevice,
16917  pImageCreateInfo,
16918  allocator->GetAllocationCallbacks(),
16919  pImage);
16920  if(res >= 0)
16921  {
16922  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16923  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16924  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16925 
16926  // 2. Allocate memory using allocator.
16927  VkMemoryRequirements vkMemReq = {};
16928  bool requiresDedicatedAllocation = false;
16929  bool prefersDedicatedAllocation = false;
16930  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16931  requiresDedicatedAllocation, prefersDedicatedAllocation);
16932 
16933  res = allocator->AllocateMemory(
16934  vkMemReq,
16935  requiresDedicatedAllocation,
16936  prefersDedicatedAllocation,
16937  VK_NULL_HANDLE, // dedicatedBuffer
16938  *pImage, // dedicatedImage
16939  *pAllocationCreateInfo,
16940  suballocType,
16941  1, // allocationCount
16942  pAllocation);
16943 
16944 #if VMA_RECORDING_ENABLED
16945  if(allocator->GetRecorder() != VMA_NULL)
16946  {
16947  allocator->GetRecorder()->RecordCreateImage(
16948  allocator->GetCurrentFrameIndex(),
16949  *pImageCreateInfo,
16950  *pAllocationCreateInfo,
16951  *pAllocation);
16952  }
16953 #endif
16954 
16955  if(res >= 0)
16956  {
16957  // 3. Bind image with memory.
16958  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16959  {
16960  res = allocator->BindImageMemory(*pAllocation, *pImage);
16961  }
16962  if(res >= 0)
16963  {
16964  // All steps succeeded.
16965  #if VMA_STATS_STRING_ENABLED
16966  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16967  #endif
16968  if(pAllocationInfo != VMA_NULL)
16969  {
16970  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16971  }
16972 
16973  return VK_SUCCESS;
16974  }
16975  allocator->FreeMemory(
16976  1, // allocationCount
16977  pAllocation);
16978  *pAllocation = VK_NULL_HANDLE;
16979  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16980  *pImage = VK_NULL_HANDLE;
16981  return res;
16982  }
16983  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16984  *pImage = VK_NULL_HANDLE;
16985  return res;
16986  }
16987  return res;
16988 }
16989 
16990 void vmaDestroyImage(
16991  VmaAllocator allocator,
16992  VkImage image,
16993  VmaAllocation allocation)
16994 {
16995  VMA_ASSERT(allocator);
16996 
16997  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16998  {
16999  return;
17000  }
17001 
17002  VMA_DEBUG_LOG("vmaDestroyImage");
17003 
17004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17005 
17006 #if VMA_RECORDING_ENABLED
17007  if(allocator->GetRecorder() != VMA_NULL)
17008  {
17009  allocator->GetRecorder()->RecordDestroyImage(
17010  allocator->GetCurrentFrameIndex(),
17011  allocation);
17012  }
17013 #endif
17014 
17015  if(image != VK_NULL_HANDLE)
17016  {
17017  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17018  }
17019  if(allocation != VK_NULL_HANDLE)
17020  {
17021  allocator->FreeMemory(
17022  1, // allocationCount
17023  &allocation);
17024  }
17025 }
17026 
17027 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2044
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1814
-
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2867
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1802
+
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2855
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
-
Definition: vk_mem_alloc.h:1788
-
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2387
-
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1768
+
Definition: vk_mem_alloc.h:1776
+
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2375
+
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1756
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Definition: vk_mem_alloc.h:2018
-
Definition: vk_mem_alloc.h:2122
-
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2820
-
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1760
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2487
-
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1811
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2903
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2276
-
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1655
+
Definition: vk_mem_alloc.h:2006
+
Definition: vk_mem_alloc.h:2110
+
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2808
+
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1748
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2475
+
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1799
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2891
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2264
+
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1643
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2368
-
Definition: vk_mem_alloc.h:2093
-
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2823
-
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1749
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2175
-
Definition: vk_mem_alloc.h:2045
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1823
-
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2304
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2356
+
Definition: vk_mem_alloc.h:2081
+
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2811
+
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1737
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2163
+
Definition: vk_mem_alloc.h:2033
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1811
+
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2292
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1877
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1808
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1865
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1796
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2049
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2037
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1949
-
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1765
-
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2857
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1948
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2907
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1937
+
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1753
+
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2845
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1936
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2895
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1840
-
VmaStatInfo total
Definition: vk_mem_alloc.h:1958
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2915
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2159
-
Definition: vk_mem_alloc.h:2117
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2898
-
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1766
-
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1691
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1828
+
VmaStatInfo total
Definition: vk_mem_alloc.h:1946
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2903
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2147
+
Definition: vk_mem_alloc.h:2105
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2886
+
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1754
+
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1679
Represents main object of this library initialized.
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1817
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1805
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2318
-
Definition: vk_mem_alloc.h:2312
-
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1772
-
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1884
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2497
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2306
+
Definition: vk_mem_alloc.h:2300
+
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1760
+
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1872
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2485
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1761
+
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1749
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
-
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1786
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2196
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2338
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2374
+
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1774
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2184
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2326
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2362
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
-
Definition: vk_mem_alloc.h:1747
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2321
+
Definition: vk_mem_alloc.h:1735
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2309
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2872
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:1996
+
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2860
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:1984
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2832
+
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2820
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
-
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2893
+
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2881
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2911
-
Definition: vk_mem_alloc.h:2035
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2183
-
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1764
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2899
+
Definition: vk_mem_alloc.h:2023
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2171
+
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1752
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1954
-
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1697
-
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2811
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1942
+
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1685
+
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2799
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
-
Definition: vk_mem_alloc.h:2809
- -
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2838
+
Definition: vk_mem_alloc.h:2797
+ +
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2826
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1718
+
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1706
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
-
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1790
-
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1723
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2913
+
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1778
+
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1711
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2901
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2170
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2384
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2158
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2372
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
-
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1757
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1937
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2333
-
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1710
-
Definition: vk_mem_alloc.h:2308
+
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1745
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1925
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2321
+
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1698
+
Definition: vk_mem_alloc.h:2296
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:2100
+
Definition: vk_mem_alloc.h:2088
Represents Opaque object that represents started defragmentation process.
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1950
-
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1714
-
Definition: vk_mem_alloc.h:2133
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2324
-
Definition: vk_mem_alloc.h:2044
-
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1763
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1938
+
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1702
+
Definition: vk_mem_alloc.h:2121
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2312
+
Definition: vk_mem_alloc.h:2032
+
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1751
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2165
-
Definition: vk_mem_alloc.h:2156
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2153
+
Definition: vk_mem_alloc.h:2144
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1940
-
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1759
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2346
-
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1826
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2377
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2154
-
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2862
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2189
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1928
+
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1747
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2334
+
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1814
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2365
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2142
+
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2850
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2177
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1865
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1956
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2080
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1949
+
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1853
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1944
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2068
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1937
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1770
-
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1796
-
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2808
-
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2886
-
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1712
-
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1769
+
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1758
+
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1784
+
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2796
+
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2874
+
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1700
+
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1757
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2360
-
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1762
-
Definition: vk_mem_alloc.h:2111
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2348
+
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1750
+
Definition: vk_mem_alloc.h:2099
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1804
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2511
-
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1820
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1949
+
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1792
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2499
+
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1808
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1937
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1946
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1934
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2365
-
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2817
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2353
+
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2805
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
-
Definition: vk_mem_alloc.h:2126
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2492
-
Definition: vk_mem_alloc.h:2140
-
Definition: vk_mem_alloc.h:2152
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2909
-
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1755
+
Definition: vk_mem_alloc.h:2114
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2480
+
Definition: vk_mem_alloc.h:2128
+
Definition: vk_mem_alloc.h:2140
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2897
+
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1743
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1944
-
Definition: vk_mem_alloc.h:2001
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2314
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1932
+
Definition: vk_mem_alloc.h:1989
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2302
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1793
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1942
-
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1767
-
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1771
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2067
-
Definition: vk_mem_alloc.h:2147
-
Definition: vk_mem_alloc.h:2028
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2506
+
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1781
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1930
+
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1755
+
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1759
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2055
+
Definition: vk_mem_alloc.h:2135
+
Definition: vk_mem_alloc.h:2016
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2494
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
-
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1745
+
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1733
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
-
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1758
-
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2293
+
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1746
+
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2281
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2473
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2461
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:2137
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2258
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1950
+
Definition: vk_mem_alloc.h:2125
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2246
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1938
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
- -
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1780
-
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1957
+ +
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1768
+
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1945
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2371
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1950
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2359
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1938
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
-
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2877
+
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2865
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2478
-
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2841
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2466
+
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2829