From 7a6e442b66ef2a62972472c5c783e89d33e75641 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 8 Nov 2018 18:46:04 +0100 Subject: [PATCH] Fixes in documentation: "Features not supported". --- docs/html/general_considerations.html | 4 +- docs/html/search/variables_c.html | 30 +++ docs/html/search/variables_c.js | 20 ++ docs/html/vk__mem__alloc_8h_source.html | 260 ++++++++++++------------ src/vk_mem_alloc.h | 11 +- 5 files changed, 193 insertions(+), 132 deletions(-) create mode 100644 docs/html/search/variables_c.html create mode 100644 docs/html/search/variables_c.js diff --git a/docs/html/general_considerations.html b/docs/html/general_considerations.html index c1bf26f..3dc7017 100644 --- a/docs/html/general_considerations.html +++ b/docs/html/general_considerations.html @@ -110,10 +110,12 @@ Allocation algorithm Features not supported

Features deliberately excluded from the scope of this library:

diff --git a/docs/html/search/variables_c.html b/docs/html/search/variables_c.html new file mode 100644 index 0000000..75709df --- /dev/null +++ b/docs/html/search/variables_c.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/html/search/variables_c.js b/docs/html/search/variables_c.js new file mode 100644 index 0000000..e7b9e7f --- /dev/null +++ b/docs/html/search/variables_c.js @@ -0,0 +1,20 @@ +var searchData= +[ + ['vkallocatememory',['vkAllocateMemory',['../struct_vma_vulkan_functions.html#a2943bf99dfd784a0e8f599d987e22e6c',1,'VmaVulkanFunctions']]], + ['vkbindbuffermemory',['vkBindBufferMemory',['../struct_vma_vulkan_functions.html#a94fc4f3a605d9880bb3c0ba2c2fc80b2',1,'VmaVulkanFunctions']]], + ['vkbindimagememory',['vkBindImageMemory',['../struct_vma_vulkan_functions.html#a1338d96a128a5ade648b8d934907c637',1,'VmaVulkanFunctions']]], + ['vkcmdcopybuffer',['vkCmdCopyBuffer',['../struct_vma_vulkan_functions.html#ae5c0db8c89a3b82593dc16aa6a49fa3a',1,'VmaVulkanFunctions']]], + ['vkcreatebuffer',['vkCreateBuffer',['../struct_vma_vulkan_functions.html#ae8084315a25006271a2edfc3a447519f',1,'VmaVulkanFunctions']]], + ['vkcreateimage',['vkCreateImage',['../struct_vma_vulkan_functions.html#a23ebe70be515b9b5010a1d691200e325',1,'VmaVulkanFunctions']]], + ['vkdestroybuffer',['vkDestroyBuffer',['../struct_vma_vulkan_functions.html#a7e054606faddb07f0e8556f3ed317d45',1,'VmaVulkanFunctions']]], + ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], + ['vkflushmappedmemoryranges',['vkFlushMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a33c322f4c4ad2810f8a9c97a277572f9',1,'VmaVulkanFunctions']]], + ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], + ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], + ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], + ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], + ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], + ['vkinvalidatemappedmemoryranges',['vkInvalidateMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a5c1093bc32386a8060c37c9f282078a1',1,'VmaVulkanFunctions']]], + ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], + ['vkunmapmemory',['vkUnmapMemory',['../struct_vma_vulkan_functions.html#acc798589736f0becb317fc2196c1d8b9',1,'VmaVulkanFunctions']]] +]; diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index d3e0ccf..b5e44ad 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,188 +65,188 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1468 /*
1469 Define this macro to 0/1 to disable/enable support for recording functionality,
1470 available through VmaAllocatorCreateInfo::pRecordSettings.
1471 */
1472 #ifndef VMA_RECORDING_ENABLED
1473  #ifdef _WIN32
1474  #define VMA_RECORDING_ENABLED 1
1475  #else
1476  #define VMA_RECORDING_ENABLED 0
1477  #endif
1478 #endif
1479 
1480 #ifndef NOMINMAX
1481  #define NOMINMAX // For windows.h
1482 #endif
1483 
1484 #include <vulkan/vulkan.h>
1485 
1486 #if VMA_RECORDING_ENABLED
1487  #include <windows.h>
1488 #endif
1489 
1490 #if !defined(VMA_DEDICATED_ALLOCATION)
1491  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1492  #define VMA_DEDICATED_ALLOCATION 1
1493  #else
1494  #define VMA_DEDICATED_ALLOCATION 0
1495  #endif
1496 #endif
1497 
1507 VK_DEFINE_HANDLE(VmaAllocator)
1508 
1509 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1511  VmaAllocator allocator,
1512  uint32_t memoryType,
1513  VkDeviceMemory memory,
1514  VkDeviceSize size);
1516 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1517  VmaAllocator allocator,
1518  uint32_t memoryType,
1519  VkDeviceMemory memory,
1520  VkDeviceSize size);
1521 
1535 
1565 
1568 typedef VkFlags VmaAllocatorCreateFlags;
1569 
1574 typedef struct VmaVulkanFunctions {
1575  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1576  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1577  PFN_vkAllocateMemory vkAllocateMemory;
1578  PFN_vkFreeMemory vkFreeMemory;
1579  PFN_vkMapMemory vkMapMemory;
1580  PFN_vkUnmapMemory vkUnmapMemory;
1581  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1582  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1583  PFN_vkBindBufferMemory vkBindBufferMemory;
1584  PFN_vkBindImageMemory vkBindImageMemory;
1585  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1586  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1587  PFN_vkCreateBuffer vkCreateBuffer;
1588  PFN_vkDestroyBuffer vkDestroyBuffer;
1589  PFN_vkCreateImage vkCreateImage;
1590  PFN_vkDestroyImage vkDestroyImage;
1591 #if VMA_DEDICATED_ALLOCATION
1592  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1593  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1594 #endif
1596 
1598 typedef enum VmaRecordFlagBits {
1605 
1608 typedef VkFlags VmaRecordFlags;
1609 
1611 typedef struct VmaRecordSettings
1612 {
1622  const char* pFilePath;
1624 
1627 {
1631 
1632  VkPhysicalDevice physicalDevice;
1634 
1635  VkDevice device;
1637 
1640 
1641  const VkAllocationCallbacks* pAllocationCallbacks;
1643 
1682  const VkDeviceSize* pHeapSizeLimit;
1703 
1705 VkResult vmaCreateAllocator(
1706  const VmaAllocatorCreateInfo* pCreateInfo,
1707  VmaAllocator* pAllocator);
1708 
1710 void vmaDestroyAllocator(
1711  VmaAllocator allocator);
1712 
1718  VmaAllocator allocator,
1719  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1720 
1726  VmaAllocator allocator,
1727  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1728 
1736  VmaAllocator allocator,
1737  uint32_t memoryTypeIndex,
1738  VkMemoryPropertyFlags* pFlags);
1739 
1749  VmaAllocator allocator,
1750  uint32_t frameIndex);
1751 
1754 typedef struct VmaStatInfo
1755 {
1757  uint32_t blockCount;
1763  VkDeviceSize usedBytes;
1765  VkDeviceSize unusedBytes;
1768 } VmaStatInfo;
1769 
1771 typedef struct VmaStats
1772 {
1773  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1774  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1776 } VmaStats;
1777 
1779 void vmaCalculateStats(
1780  VmaAllocator allocator,
1781  VmaStats* pStats);
1782 
1783 #define VMA_STATS_STRING_ENABLED 1
1784 
1785 #if VMA_STATS_STRING_ENABLED
1786 
1788 
1790 void vmaBuildStatsString(
1791  VmaAllocator allocator,
1792  char** ppStatsString,
1793  VkBool32 detailedMap);
1794 
1795 void vmaFreeStatsString(
1796  VmaAllocator allocator,
1797  char* pStatsString);
1798 
1799 #endif // #if VMA_STATS_STRING_ENABLED
1800 
1809 VK_DEFINE_HANDLE(VmaPool)
1810 
1811 typedef enum VmaMemoryUsage
1812 {
1861 } VmaMemoryUsage;
1862 
1877 
1932 
1945 
1955 
1962 
1966 
1968 {
1981  VkMemoryPropertyFlags requiredFlags;
1986  VkMemoryPropertyFlags preferredFlags;
1994  uint32_t memoryTypeBits;
2007  void* pUserData;
2009 
2026 VkResult vmaFindMemoryTypeIndex(
2027  VmaAllocator allocator,
2028  uint32_t memoryTypeBits,
2029  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2030  uint32_t* pMemoryTypeIndex);
2031 
2045  VmaAllocator allocator,
2046  const VkBufferCreateInfo* pBufferCreateInfo,
2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2048  uint32_t* pMemoryTypeIndex);
2049 
2063  VmaAllocator allocator,
2064  const VkImageCreateInfo* pImageCreateInfo,
2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2066  uint32_t* pMemoryTypeIndex);
2067 
2088 
2105 
2116 
2122 
2125 typedef VkFlags VmaPoolCreateFlags;
2126 
2129 typedef struct VmaPoolCreateInfo {
2144  VkDeviceSize blockSize;
2173 
2176 typedef struct VmaPoolStats {
2179  VkDeviceSize size;
2182  VkDeviceSize unusedSize;
2195  VkDeviceSize unusedRangeSizeMax;
2198  size_t blockCount;
2199 } VmaPoolStats;
2200 
2207 VkResult vmaCreatePool(
2208  VmaAllocator allocator,
2209  const VmaPoolCreateInfo* pCreateInfo,
2210  VmaPool* pPool);
2211 
2214 void vmaDestroyPool(
2215  VmaAllocator allocator,
2216  VmaPool pool);
2217 
2224 void vmaGetPoolStats(
2225  VmaAllocator allocator,
2226  VmaPool pool,
2227  VmaPoolStats* pPoolStats);
2228 
2236  VmaAllocator allocator,
2237  VmaPool pool,
2238  size_t* pLostAllocationCount);
2239 
2254 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2255 
2280 VK_DEFINE_HANDLE(VmaAllocation)
2281 
2282 
2284 typedef struct VmaAllocationInfo {
2289  uint32_t memoryType;
2298  VkDeviceMemory deviceMemory;
2303  VkDeviceSize offset;
2308  VkDeviceSize size;
2322  void* pUserData;
2324 
2335 VkResult vmaAllocateMemory(
2336  VmaAllocator allocator,
2337  const VkMemoryRequirements* pVkMemoryRequirements,
2338  const VmaAllocationCreateInfo* pCreateInfo,
2339  VmaAllocation* pAllocation,
2340  VmaAllocationInfo* pAllocationInfo);
2341 
2349  VmaAllocator allocator,
2350  VkBuffer buffer,
2351  const VmaAllocationCreateInfo* pCreateInfo,
2352  VmaAllocation* pAllocation,
2353  VmaAllocationInfo* pAllocationInfo);
2354 
2356 VkResult vmaAllocateMemoryForImage(
2357  VmaAllocator allocator,
2358  VkImage image,
2359  const VmaAllocationCreateInfo* pCreateInfo,
2360  VmaAllocation* pAllocation,
2361  VmaAllocationInfo* pAllocationInfo);
2362 
2364 void vmaFreeMemory(
2365  VmaAllocator allocator,
2366  VmaAllocation allocation);
2367 
2385  VmaAllocator allocator,
2386  VmaAllocation allocation,
2387  VmaAllocationInfo* pAllocationInfo);
2388 
2403 VkBool32 vmaTouchAllocation(
2404  VmaAllocator allocator,
2405  VmaAllocation allocation);
2406 
2421  VmaAllocator allocator,
2422  VmaAllocation allocation,
2423  void* pUserData);
2424 
2436  VmaAllocator allocator,
2437  VmaAllocation* pAllocation);
2438 
2473 VkResult vmaMapMemory(
2474  VmaAllocator allocator,
2475  VmaAllocation allocation,
2476  void** ppData);
2477 
2482 void vmaUnmapMemory(
2483  VmaAllocator allocator,
2484  VmaAllocation allocation);
2485 
2498 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2499 
2512 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2513 
2530 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2531 
2533 typedef struct VmaDefragmentationInfo {
2538  VkDeviceSize maxBytesToMove;
2545 
2547 typedef struct VmaDefragmentationStats {
2549  VkDeviceSize bytesMoved;
2551  VkDeviceSize bytesFreed;
2557 
2596 VkResult vmaDefragment(
2597  VmaAllocator allocator,
2598  VmaAllocation* pAllocations,
2599  size_t allocationCount,
2600  VkBool32* pAllocationsChanged,
2601  const VmaDefragmentationInfo *pDefragmentationInfo,
2602  VmaDefragmentationStats* pDefragmentationStats);
2603 
2616 VkResult vmaBindBufferMemory(
2617  VmaAllocator allocator,
2618  VmaAllocation allocation,
2619  VkBuffer buffer);
2620 
2633 VkResult vmaBindImageMemory(
2634  VmaAllocator allocator,
2635  VmaAllocation allocation,
2636  VkImage image);
2637 
2664 VkResult vmaCreateBuffer(
2665  VmaAllocator allocator,
2666  const VkBufferCreateInfo* pBufferCreateInfo,
2667  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2668  VkBuffer* pBuffer,
2669  VmaAllocation* pAllocation,
2670  VmaAllocationInfo* pAllocationInfo);
2671 
2683 void vmaDestroyBuffer(
2684  VmaAllocator allocator,
2685  VkBuffer buffer,
2686  VmaAllocation allocation);
2687 
2689 VkResult vmaCreateImage(
2690  VmaAllocator allocator,
2691  const VkImageCreateInfo* pImageCreateInfo,
2692  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2693  VkImage* pImage,
2694  VmaAllocation* pAllocation,
2695  VmaAllocationInfo* pAllocationInfo);
2696 
2708 void vmaDestroyImage(
2709  VmaAllocator allocator,
2710  VkImage image,
2711  VmaAllocation allocation);
2712 
2713 #ifdef __cplusplus
2714 }
2715 #endif
2716 
2717 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2718 
2719 // For Visual Studio IntelliSense.
2720 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2721 #define VMA_IMPLEMENTATION
2722 #endif
2723 
2724 #ifdef VMA_IMPLEMENTATION
2725 #undef VMA_IMPLEMENTATION
2726 
2727 #include <cstdint>
2728 #include <cstdlib>
2729 #include <cstring>
2730 
2731 /*******************************************************************************
2732 CONFIGURATION SECTION
2733 
2734 Define some of these macros before each #include of this header or change them
2735 here if you need other then default behavior depending on your environment.
2736 */
2737 
2738 /*
2739 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2740 internally, like:
2741 
2742  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2743 
2744 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2745 VmaAllocatorCreateInfo::pVulkanFunctions.
2746 */
2747 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2748 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2749 #endif
2750 
2751 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2752 //#define VMA_USE_STL_CONTAINERS 1
2753 
2754 /* Set this macro to 1 to make the library including and using STL containers:
2755 std::pair, std::vector, std::list, std::unordered_map.
2756 
2757 Set it to 0 or undefined to make the library using its own implementation of
2758 the containers.
2759 */
2760 #if VMA_USE_STL_CONTAINERS
2761  #define VMA_USE_STL_VECTOR 1
2762  #define VMA_USE_STL_UNORDERED_MAP 1
2763  #define VMA_USE_STL_LIST 1
2764 #endif
2765 
2766 #if VMA_USE_STL_VECTOR
2767  #include <vector>
2768 #endif
2769 
2770 #if VMA_USE_STL_UNORDERED_MAP
2771  #include <unordered_map>
2772 #endif
2773 
2774 #if VMA_USE_STL_LIST
2775  #include <list>
2776 #endif
2777 
2778 /*
2779 Following headers are used in this CONFIGURATION section only, so feel free to
2780 remove them if not needed.
2781 */
2782 #include <cassert> // for assert
2783 #include <algorithm> // for min, max
2784 #include <mutex> // for std::mutex
2785 #include <atomic> // for std::atomic
2786 
2787 #ifndef VMA_NULL
2788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2789  #define VMA_NULL nullptr
2790 #endif
2791 
2792 #if defined(__APPLE__) || defined(__ANDROID__)
2793 #include <cstdlib>
2794 void *aligned_alloc(size_t alignment, size_t size)
2795 {
2796  // alignment must be >= sizeof(void*)
2797  if(alignment < sizeof(void*))
2798  {
2799  alignment = sizeof(void*);
2800  }
2801 
2802  void *pointer;
2803  if(posix_memalign(&pointer, alignment, size) == 0)
2804  return pointer;
2805  return VMA_NULL;
2806 }
2807 #endif
2808 
2809 // If your compiler is not compatible with C++11 and definition of
2810 // aligned_alloc() function is missing, uncommeting following line may help:
2811 
2812 //#include <malloc.h>
2813 
2814 // Normal assert to check for programmer's errors, especially in Debug configuration.
2815 #ifndef VMA_ASSERT
2816  #ifdef _DEBUG
2817  #define VMA_ASSERT(expr) assert(expr)
2818  #else
2819  #define VMA_ASSERT(expr)
2820  #endif
2821 #endif
2822 
2823 // Assert that will be called very often, like inside data structures e.g. operator[].
2824 // Making it non-empty can make program slow.
2825 #ifndef VMA_HEAVY_ASSERT
2826  #ifdef _DEBUG
2827  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2828  #else
2829  #define VMA_HEAVY_ASSERT(expr)
2830  #endif
2831 #endif
2832 
2833 #ifndef VMA_ALIGN_OF
2834  #define VMA_ALIGN_OF(type) (__alignof(type))
2835 #endif
2836 
2837 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2838  #if defined(_WIN32)
2839  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2840  #else
2841  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2842  #endif
2843 #endif
2844 
2845 #ifndef VMA_SYSTEM_FREE
2846  #if defined(_WIN32)
2847  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2848  #else
2849  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2850  #endif
2851 #endif
2852 
2853 #ifndef VMA_MIN
2854  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2855 #endif
2856 
2857 #ifndef VMA_MAX
2858  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2859 #endif
2860 
2861 #ifndef VMA_SWAP
2862  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2863 #endif
2864 
2865 #ifndef VMA_SORT
2866  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2867 #endif
2868 
2869 #ifndef VMA_DEBUG_LOG
2870  #define VMA_DEBUG_LOG(format, ...)
2871  /*
2872  #define VMA_DEBUG_LOG(format, ...) do { \
2873  printf(format, __VA_ARGS__); \
2874  printf("\n"); \
2875  } while(false)
2876  */
2877 #endif
2878 
2879 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2880 #if VMA_STATS_STRING_ENABLED
2881  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2882  {
2883  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2884  }
2885  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2886  {
2887  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2888  }
2889  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2890  {
2891  snprintf(outStr, strLen, "%p", ptr);
2892  }
2893 #endif
2894 
2895 #ifndef VMA_MUTEX
2896  class VmaMutex
2897  {
2898  public:
2899  VmaMutex() { }
2900  ~VmaMutex() { }
2901  void Lock() { m_Mutex.lock(); }
2902  void Unlock() { m_Mutex.unlock(); }
2903  private:
2904  std::mutex m_Mutex;
2905  };
2906  #define VMA_MUTEX VmaMutex
2907 #endif
2908 
2909 /*
2910 If providing your own implementation, you need to implement a subset of std::atomic:
2911 
2912 - Constructor(uint32_t desired)
2913 - uint32_t load() const
2914 - void store(uint32_t desired)
2915 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2916 */
2917 #ifndef VMA_ATOMIC_UINT32
2918  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2919 #endif
2920 
2921 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2922 
2926  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2927 #endif
2928 
2929 #ifndef VMA_DEBUG_ALIGNMENT
2930 
2934  #define VMA_DEBUG_ALIGNMENT (1)
2935 #endif
2936 
2937 #ifndef VMA_DEBUG_MARGIN
2938 
2942  #define VMA_DEBUG_MARGIN (0)
2943 #endif
2944 
2945 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2946 
2950  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2951 #endif
2952 
2953 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2954 
2959  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2960 #endif
2961 
2962 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2963 
2967  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2968 #endif
2969 
2970 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2971 
2975  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2976 #endif
2977 
2978 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2979  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2981 #endif
2982 
2983 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2984  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2986 #endif
2987 
2988 #ifndef VMA_CLASS_NO_COPY
2989  #define VMA_CLASS_NO_COPY(className) \
2990  private: \
2991  className(const className&) = delete; \
2992  className& operator=(const className&) = delete;
2993 #endif
2994 
2995 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2996 
2997 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2998 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2999 
3000 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3001 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3002 
3003 /*******************************************************************************
3004 END OF CONFIGURATION
3005 */
3006 
3007 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3008  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3009 
3010 // Returns number of bits set to 1 in (v).
3011 static inline uint32_t VmaCountBitsSet(uint32_t v)
3012 {
3013  uint32_t c = v - ((v >> 1) & 0x55555555);
3014  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3015  c = ((c >> 4) + c) & 0x0F0F0F0F;
3016  c = ((c >> 8) + c) & 0x00FF00FF;
3017  c = ((c >> 16) + c) & 0x0000FFFF;
3018  return c;
3019 }
3020 
3021 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3022 // Use types like uint32_t, uint64_t as T.
3023 template <typename T>
3024 static inline T VmaAlignUp(T val, T align)
3025 {
3026  return (val + align - 1) / align * align;
3027 }
3028 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3029 // Use types like uint32_t, uint64_t as T.
3030 template <typename T>
3031 static inline T VmaAlignDown(T val, T align)
3032 {
3033  return val / align * align;
3034 }
3035 
3036 // Division with mathematical rounding to nearest number.
3037 template <typename T>
3038 static inline T VmaRoundDiv(T x, T y)
3039 {
3040  return (x + (y / (T)2)) / y;
3041 }
3042 
3043 /*
3044 Returns true if given number is a power of two.
3045 T must be unsigned integer number or signed integer but always nonnegative.
3046 For 0 returns true.
3047 */
3048 template <typename T>
3049 inline bool VmaIsPow2(T x)
3050 {
3051  return (x & (x-1)) == 0;
3052 }
3053 
3054 // Returns smallest power of 2 greater or equal to v.
3055 static inline uint32_t VmaNextPow2(uint32_t v)
3056 {
3057  v--;
3058  v |= v >> 1;
3059  v |= v >> 2;
3060  v |= v >> 4;
3061  v |= v >> 8;
3062  v |= v >> 16;
3063  v++;
3064  return v;
3065 }
3066 static inline uint64_t VmaNextPow2(uint64_t v)
3067 {
3068  v--;
3069  v |= v >> 1;
3070  v |= v >> 2;
3071  v |= v >> 4;
3072  v |= v >> 8;
3073  v |= v >> 16;
3074  v |= v >> 32;
3075  v++;
3076  return v;
3077 }
3078 
3079 // Returns largest power of 2 less or equal to v.
3080 static inline uint32_t VmaPrevPow2(uint32_t v)
3081 {
3082  v |= v >> 1;
3083  v |= v >> 2;
3084  v |= v >> 4;
3085  v |= v >> 8;
3086  v |= v >> 16;
3087  v = v ^ (v >> 1);
3088  return v;
3089 }
3090 static inline uint64_t VmaPrevPow2(uint64_t v)
3091 {
3092  v |= v >> 1;
3093  v |= v >> 2;
3094  v |= v >> 4;
3095  v |= v >> 8;
3096  v |= v >> 16;
3097  v |= v >> 32;
3098  v = v ^ (v >> 1);
3099  return v;
3100 }
3101 
3102 static inline bool VmaStrIsEmpty(const char* pStr)
3103 {
3104  return pStr == VMA_NULL || *pStr == '\0';
3105 }
3106 
3107 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3108 {
3109  switch(algorithm)
3110  {
3112  return "Linear";
3114  return "Buddy";
3115  case 0:
3116  return "Default";
3117  default:
3118  VMA_ASSERT(0);
3119  return "";
3120  }
3121 }
3122 
3123 #ifndef VMA_SORT
3124 
3125 template<typename Iterator, typename Compare>
3126 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3127 {
3128  Iterator centerValue = end; --centerValue;
3129  Iterator insertIndex = beg;
3130  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3131  {
3132  if(cmp(*memTypeIndex, *centerValue))
3133  {
3134  if(insertIndex != memTypeIndex)
3135  {
3136  VMA_SWAP(*memTypeIndex, *insertIndex);
3137  }
3138  ++insertIndex;
3139  }
3140  }
3141  if(insertIndex != centerValue)
3142  {
3143  VMA_SWAP(*insertIndex, *centerValue);
3144  }
3145  return insertIndex;
3146 }
3147 
3148 template<typename Iterator, typename Compare>
3149 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3150 {
3151  if(beg < end)
3152  {
3153  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3154  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3155  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3156  }
3157 }
3158 
3159 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3160 
3161 #endif // #ifndef VMA_SORT
3162 
3163 /*
3164 Returns true if two memory blocks occupy overlapping pages.
3165 ResourceA must be in less memory offset than ResourceB.
3166 
3167 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3168 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3169 */
3170 static inline bool VmaBlocksOnSamePage(
3171  VkDeviceSize resourceAOffset,
3172  VkDeviceSize resourceASize,
3173  VkDeviceSize resourceBOffset,
3174  VkDeviceSize pageSize)
3175 {
3176  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3177  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3178  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3179  VkDeviceSize resourceBStart = resourceBOffset;
3180  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3181  return resourceAEndPage == resourceBStartPage;
3182 }
3183 
3184 enum VmaSuballocationType
3185 {
3186  VMA_SUBALLOCATION_TYPE_FREE = 0,
3187  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3188  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3189  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3190  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3191  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3192  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3193 };
3194 
3195 /*
3196 Returns true if given suballocation types could conflict and must respect
3197 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3198 or linear image and another one is optimal image. If type is unknown, behave
3199 conservatively.
3200 */
3201 static inline bool VmaIsBufferImageGranularityConflict(
3202  VmaSuballocationType suballocType1,
3203  VmaSuballocationType suballocType2)
3204 {
3205  if(suballocType1 > suballocType2)
3206  {
3207  VMA_SWAP(suballocType1, suballocType2);
3208  }
3209 
3210  switch(suballocType1)
3211  {
3212  case VMA_SUBALLOCATION_TYPE_FREE:
3213  return false;
3214  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3215  return true;
3216  case VMA_SUBALLOCATION_TYPE_BUFFER:
3217  return
3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3220  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3221  return
3222  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3224  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3225  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3226  return
3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3228  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3229  return false;
3230  default:
3231  VMA_ASSERT(0);
3232  return true;
3233  }
3234 }
3235 
3236 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3237 {
3238  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3239  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3240  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3241  {
3242  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3243  }
3244 }
3245 
3246 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3247 {
3248  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3249  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3250  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3251  {
3252  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3253  {
3254  return false;
3255  }
3256  }
3257  return true;
3258 }
3259 
3260 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3261 struct VmaMutexLock
3262 {
3263  VMA_CLASS_NO_COPY(VmaMutexLock)
3264 public:
3265  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3266  m_pMutex(useMutex ? &mutex : VMA_NULL)
3267  {
3268  if(m_pMutex)
3269  {
3270  m_pMutex->Lock();
3271  }
3272  }
3273 
3274  ~VmaMutexLock()
3275  {
3276  if(m_pMutex)
3277  {
3278  m_pMutex->Unlock();
3279  }
3280  }
3281 
3282 private:
3283  VMA_MUTEX* m_pMutex;
3284 };
3285 
3286 #if VMA_DEBUG_GLOBAL_MUTEX
3287  static VMA_MUTEX gDebugGlobalMutex;
3288  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3289 #else
3290  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3291 #endif
3292 
3293 // Minimum size of a free suballocation to register it in the free suballocation collection.
3294 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3295 
3296 /*
3297 Performs binary search and returns iterator to first element that is greater or
3298 equal to (key), according to comparison (cmp).
3299 
3300 Cmp should return true if first argument is less than second argument.
3301 
3302 Returned value is the found element, if present in the collection or place where
3303 new element with value (key) should be inserted.
3304 */
3305 template <typename CmpLess, typename IterT, typename KeyT>
3306 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3307 {
3308  size_t down = 0, up = (end - beg);
3309  while(down < up)
3310  {
3311  const size_t mid = (down + up) / 2;
3312  if(cmp(*(beg+mid), key))
3313  {
3314  down = mid + 1;
3315  }
3316  else
3317  {
3318  up = mid;
3319  }
3320  }
3321  return beg + down;
3322 }
3323 
3325 // Memory allocation
3326 
3327 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3328 {
3329  if((pAllocationCallbacks != VMA_NULL) &&
3330  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3331  {
3332  return (*pAllocationCallbacks->pfnAllocation)(
3333  pAllocationCallbacks->pUserData,
3334  size,
3335  alignment,
3336  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3337  }
3338  else
3339  {
3340  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3341  }
3342 }
3343 
3344 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3345 {
3346  if((pAllocationCallbacks != VMA_NULL) &&
3347  (pAllocationCallbacks->pfnFree != VMA_NULL))
3348  {
3349  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3350  }
3351  else
3352  {
3353  VMA_SYSTEM_FREE(ptr);
3354  }
3355 }
3356 
3357 template<typename T>
3358 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3359 {
3360  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3361 }
3362 
3363 template<typename T>
3364 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3365 {
3366  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3367 }
3368 
3369 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3370 
3371 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3372 
3373 template<typename T>
3374 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3375 {
3376  ptr->~T();
3377  VmaFree(pAllocationCallbacks, ptr);
3378 }
3379 
3380 template<typename T>
3381 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3382 {
3383  if(ptr != VMA_NULL)
3384  {
3385  for(size_t i = count; i--; )
3386  {
3387  ptr[i].~T();
3388  }
3389  VmaFree(pAllocationCallbacks, ptr);
3390  }
3391 }
3392 
3393 // STL-compatible allocator.
3394 template<typename T>
3395 class VmaStlAllocator
3396 {
3397 public:
3398  const VkAllocationCallbacks* const m_pCallbacks;
3399  typedef T value_type;
3400 
3401  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3402  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3403 
3404  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3405  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3406 
3407  template<typename U>
3408  bool operator==(const VmaStlAllocator<U>& rhs) const
3409  {
3410  return m_pCallbacks == rhs.m_pCallbacks;
3411  }
3412  template<typename U>
3413  bool operator!=(const VmaStlAllocator<U>& rhs) const
3414  {
3415  return m_pCallbacks != rhs.m_pCallbacks;
3416  }
3417 
3418  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3419 };
3420 
3421 #if VMA_USE_STL_VECTOR
3422 
3423 #define VmaVector std::vector
3424 
3425 template<typename T, typename allocatorT>
3426 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3427 {
3428  vec.insert(vec.begin() + index, item);
3429 }
3430 
3431 template<typename T, typename allocatorT>
3432 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3433 {
3434  vec.erase(vec.begin() + index);
3435 }
3436 
3437 #else // #if VMA_USE_STL_VECTOR
3438 
3439 /* Class with interface compatible with subset of std::vector.
3440 T must be POD because constructors and destructors are not called and memcpy is
3441 used for these objects. */
3442 template<typename T, typename AllocatorT>
3443 class VmaVector
3444 {
3445 public:
3446  typedef T value_type;
3447 
3448  VmaVector(const AllocatorT& allocator) :
3449  m_Allocator(allocator),
3450  m_pArray(VMA_NULL),
3451  m_Count(0),
3452  m_Capacity(0)
3453  {
3454  }
3455 
3456  VmaVector(size_t count, const AllocatorT& allocator) :
3457  m_Allocator(allocator),
3458  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3459  m_Count(count),
3460  m_Capacity(count)
3461  {
3462  }
3463 
3464  VmaVector(const VmaVector<T, AllocatorT>& src) :
3465  m_Allocator(src.m_Allocator),
3466  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3467  m_Count(src.m_Count),
3468  m_Capacity(src.m_Count)
3469  {
3470  if(m_Count != 0)
3471  {
3472  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3473  }
3474  }
3475 
3476  ~VmaVector()
3477  {
3478  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3479  }
3480 
3481  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3482  {
3483  if(&rhs != this)
3484  {
3485  resize(rhs.m_Count);
3486  if(m_Count != 0)
3487  {
3488  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3489  }
3490  }
3491  return *this;
3492  }
3493 
3494  bool empty() const { return m_Count == 0; }
3495  size_t size() const { return m_Count; }
3496  T* data() { return m_pArray; }
3497  const T* data() const { return m_pArray; }
3498 
3499  T& operator[](size_t index)
3500  {
3501  VMA_HEAVY_ASSERT(index < m_Count);
3502  return m_pArray[index];
3503  }
3504  const T& operator[](size_t index) const
3505  {
3506  VMA_HEAVY_ASSERT(index < m_Count);
3507  return m_pArray[index];
3508  }
3509 
3510  T& front()
3511  {
3512  VMA_HEAVY_ASSERT(m_Count > 0);
3513  return m_pArray[0];
3514  }
3515  const T& front() const
3516  {
3517  VMA_HEAVY_ASSERT(m_Count > 0);
3518  return m_pArray[0];
3519  }
3520  T& back()
3521  {
3522  VMA_HEAVY_ASSERT(m_Count > 0);
3523  return m_pArray[m_Count - 1];
3524  }
3525  const T& back() const
3526  {
3527  VMA_HEAVY_ASSERT(m_Count > 0);
3528  return m_pArray[m_Count - 1];
3529  }
3530 
3531  void reserve(size_t newCapacity, bool freeMemory = false)
3532  {
3533  newCapacity = VMA_MAX(newCapacity, m_Count);
3534 
3535  if((newCapacity < m_Capacity) && !freeMemory)
3536  {
3537  newCapacity = m_Capacity;
3538  }
3539 
3540  if(newCapacity != m_Capacity)
3541  {
3542  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3543  if(m_Count != 0)
3544  {
3545  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3546  }
3547  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3548  m_Capacity = newCapacity;
3549  m_pArray = newArray;
3550  }
3551  }
3552 
3553  void resize(size_t newCount, bool freeMemory = false)
3554  {
3555  size_t newCapacity = m_Capacity;
3556  if(newCount > m_Capacity)
3557  {
3558  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3559  }
3560  else if(freeMemory)
3561  {
3562  newCapacity = newCount;
3563  }
3564 
3565  if(newCapacity != m_Capacity)
3566  {
3567  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3568  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3569  if(elementsToCopy != 0)
3570  {
3571  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3572  }
3573  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3574  m_Capacity = newCapacity;
3575  m_pArray = newArray;
3576  }
3577 
3578  m_Count = newCount;
3579  }
3580 
3581  void clear(bool freeMemory = false)
3582  {
3583  resize(0, freeMemory);
3584  }
3585 
3586  void insert(size_t index, const T& src)
3587  {
3588  VMA_HEAVY_ASSERT(index <= m_Count);
3589  const size_t oldCount = size();
3590  resize(oldCount + 1);
3591  if(index < oldCount)
3592  {
3593  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3594  }
3595  m_pArray[index] = src;
3596  }
3597 
3598  void remove(size_t index)
3599  {
3600  VMA_HEAVY_ASSERT(index < m_Count);
3601  const size_t oldCount = size();
3602  if(index < oldCount - 1)
3603  {
3604  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3605  }
3606  resize(oldCount - 1);
3607  }
3608 
3609  void push_back(const T& src)
3610  {
3611  const size_t newIndex = size();
3612  resize(newIndex + 1);
3613  m_pArray[newIndex] = src;
3614  }
3615 
3616  void pop_back()
3617  {
3618  VMA_HEAVY_ASSERT(m_Count > 0);
3619  resize(size() - 1);
3620  }
3621 
3622  void push_front(const T& src)
3623  {
3624  insert(0, src);
3625  }
3626 
3627  void pop_front()
3628  {
3629  VMA_HEAVY_ASSERT(m_Count > 0);
3630  remove(0);
3631  }
3632 
3633  typedef T* iterator;
3634 
3635  iterator begin() { return m_pArray; }
3636  iterator end() { return m_pArray + m_Count; }
3637 
3638 private:
3639  AllocatorT m_Allocator;
3640  T* m_pArray;
3641  size_t m_Count;
3642  size_t m_Capacity;
3643 };
3644 
3645 template<typename T, typename allocatorT>
3646 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3647 {
3648  vec.insert(index, item);
3649 }
3650 
3651 template<typename T, typename allocatorT>
3652 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3653 {
3654  vec.remove(index);
3655 }
3656 
3657 #endif // #if VMA_USE_STL_VECTOR
3658 
3659 template<typename CmpLess, typename VectorT>
3660 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3661 {
3662  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3663  vector.data(),
3664  vector.data() + vector.size(),
3665  value,
3666  CmpLess()) - vector.data();
3667  VmaVectorInsert(vector, indexToInsert, value);
3668  return indexToInsert;
3669 }
3670 
3671 template<typename CmpLess, typename VectorT>
3672 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3673 {
3674  CmpLess comparator;
3675  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3676  vector.begin(),
3677  vector.end(),
3678  value,
3679  comparator);
3680  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3681  {
3682  size_t indexToRemove = it - vector.begin();
3683  VmaVectorRemove(vector, indexToRemove);
3684  return true;
3685  }
3686  return false;
3687 }
3688 
3689 template<typename CmpLess, typename IterT, typename KeyT>
3690 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3691 {
3692  CmpLess comparator;
3693  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3694  beg, end, value, comparator);
3695  if(it == end ||
3696  (!comparator(*it, value) && !comparator(value, *it)))
3697  {
3698  return it;
3699  }
3700  return end;
3701 }
3702 
3704 // class VmaPoolAllocator
3705 
3706 /*
3707 Allocator for objects of type T using a list of arrays (pools) to speed up
3708 allocation. Number of elements that can be allocated is not bounded because
3709 allocator can create multiple blocks.
3710 */
3711 template<typename T>
3712 class VmaPoolAllocator
3713 {
3714  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3715 public:
3716  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3717  ~VmaPoolAllocator();
3718  void Clear();
3719  T* Alloc();
3720  void Free(T* ptr);
3721 
3722 private:
3723  union Item
3724  {
3725  uint32_t NextFreeIndex;
3726  T Value;
3727  };
3728 
3729  struct ItemBlock
3730  {
3731  Item* pItems;
3732  uint32_t FirstFreeIndex;
3733  };
3734 
3735  const VkAllocationCallbacks* m_pAllocationCallbacks;
3736  size_t m_ItemsPerBlock;
3737  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3738 
3739  ItemBlock& CreateNewBlock();
3740 };
3741 
3742 template<typename T>
3743 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3744  m_pAllocationCallbacks(pAllocationCallbacks),
3745  m_ItemsPerBlock(itemsPerBlock),
3746  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3747 {
3748  VMA_ASSERT(itemsPerBlock > 0);
3749 }
3750 
3751 template<typename T>
3752 VmaPoolAllocator<T>::~VmaPoolAllocator()
3753 {
3754  Clear();
3755 }
3756 
3757 template<typename T>
3758 void VmaPoolAllocator<T>::Clear()
3759 {
3760  for(size_t i = m_ItemBlocks.size(); i--; )
3761  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3762  m_ItemBlocks.clear();
3763 }
3764 
3765 template<typename T>
3766 T* VmaPoolAllocator<T>::Alloc()
3767 {
3768  for(size_t i = m_ItemBlocks.size(); i--; )
3769  {
3770  ItemBlock& block = m_ItemBlocks[i];
3771  // This block has some free items: Use first one.
3772  if(block.FirstFreeIndex != UINT32_MAX)
3773  {
3774  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3775  block.FirstFreeIndex = pItem->NextFreeIndex;
3776  return &pItem->Value;
3777  }
3778  }
3779 
3780  // No block has free item: Create new one and use it.
3781  ItemBlock& newBlock = CreateNewBlock();
3782  Item* const pItem = &newBlock.pItems[0];
3783  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3784  return &pItem->Value;
3785 }
3786 
3787 template<typename T>
3788 void VmaPoolAllocator<T>::Free(T* ptr)
3789 {
3790  // Search all memory blocks to find ptr.
3791  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3792  {
3793  ItemBlock& block = m_ItemBlocks[i];
3794 
3795  // Casting to union.
3796  Item* pItemPtr;
3797  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3798 
3799  // Check if pItemPtr is in address range of this block.
3800  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3801  {
3802  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3803  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3804  block.FirstFreeIndex = index;
3805  return;
3806  }
3807  }
3808  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3809 }
3810 
3811 template<typename T>
3812 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3813 {
3814  ItemBlock newBlock = {
3815  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3816 
3817  m_ItemBlocks.push_back(newBlock);
3818 
3819  // Setup singly-linked list of all free items in this block.
3820  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3821  newBlock.pItems[i].NextFreeIndex = i + 1;
3822  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3823  return m_ItemBlocks.back();
3824 }
3825 
3827 // class VmaRawList, VmaList
3828 
3829 #if VMA_USE_STL_LIST
3830 
3831 #define VmaList std::list
3832 
3833 #else // #if VMA_USE_STL_LIST
3834 
3835 template<typename T>
3836 struct VmaListItem
3837 {
3838  VmaListItem* pPrev;
3839  VmaListItem* pNext;
3840  T Value;
3841 };
3842 
3843 // Doubly linked list.
3844 template<typename T>
3845 class VmaRawList
3846 {
3847  VMA_CLASS_NO_COPY(VmaRawList)
3848 public:
3849  typedef VmaListItem<T> ItemType;
3850 
3851  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3852  ~VmaRawList();
3853  void Clear();
3854 
3855  size_t GetCount() const { return m_Count; }
3856  bool IsEmpty() const { return m_Count == 0; }
3857 
3858  ItemType* Front() { return m_pFront; }
3859  const ItemType* Front() const { return m_pFront; }
3860  ItemType* Back() { return m_pBack; }
3861  const ItemType* Back() const { return m_pBack; }
3862 
3863  ItemType* PushBack();
3864  ItemType* PushFront();
3865  ItemType* PushBack(const T& value);
3866  ItemType* PushFront(const T& value);
3867  void PopBack();
3868  void PopFront();
3869 
3870  // Item can be null - it means PushBack.
3871  ItemType* InsertBefore(ItemType* pItem);
3872  // Item can be null - it means PushFront.
3873  ItemType* InsertAfter(ItemType* pItem);
3874 
3875  ItemType* InsertBefore(ItemType* pItem, const T& value);
3876  ItemType* InsertAfter(ItemType* pItem, const T& value);
3877 
3878  void Remove(ItemType* pItem);
3879 
3880 private:
3881  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3882  VmaPoolAllocator<ItemType> m_ItemAllocator;
3883  ItemType* m_pFront;
3884  ItemType* m_pBack;
3885  size_t m_Count;
3886 };
3887 
3888 template<typename T>
3889 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3890  m_pAllocationCallbacks(pAllocationCallbacks),
3891  m_ItemAllocator(pAllocationCallbacks, 128),
3892  m_pFront(VMA_NULL),
3893  m_pBack(VMA_NULL),
3894  m_Count(0)
3895 {
3896 }
3897 
3898 template<typename T>
3899 VmaRawList<T>::~VmaRawList()
3900 {
3901  // Intentionally not calling Clear, because that would be unnecessary
3902  // computations to return all items to m_ItemAllocator as free.
3903 }
3904 
3905 template<typename T>
3906 void VmaRawList<T>::Clear()
3907 {
3908  if(IsEmpty() == false)
3909  {
3910  ItemType* pItem = m_pBack;
3911  while(pItem != VMA_NULL)
3912  {
3913  ItemType* const pPrevItem = pItem->pPrev;
3914  m_ItemAllocator.Free(pItem);
3915  pItem = pPrevItem;
3916  }
3917  m_pFront = VMA_NULL;
3918  m_pBack = VMA_NULL;
3919  m_Count = 0;
3920  }
3921 }
3922 
3923 template<typename T>
3924 VmaListItem<T>* VmaRawList<T>::PushBack()
3925 {
3926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3927  pNewItem->pNext = VMA_NULL;
3928  if(IsEmpty())
3929  {
3930  pNewItem->pPrev = VMA_NULL;
3931  m_pFront = pNewItem;
3932  m_pBack = pNewItem;
3933  m_Count = 1;
3934  }
3935  else
3936  {
3937  pNewItem->pPrev = m_pBack;
3938  m_pBack->pNext = pNewItem;
3939  m_pBack = pNewItem;
3940  ++m_Count;
3941  }
3942  return pNewItem;
3943 }
3944 
3945 template<typename T>
3946 VmaListItem<T>* VmaRawList<T>::PushFront()
3947 {
3948  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3949  pNewItem->pPrev = VMA_NULL;
3950  if(IsEmpty())
3951  {
3952  pNewItem->pNext = VMA_NULL;
3953  m_pFront = pNewItem;
3954  m_pBack = pNewItem;
3955  m_Count = 1;
3956  }
3957  else
3958  {
3959  pNewItem->pNext = m_pFront;
3960  m_pFront->pPrev = pNewItem;
3961  m_pFront = pNewItem;
3962  ++m_Count;
3963  }
3964  return pNewItem;
3965 }
3966 
3967 template<typename T>
3968 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3969 {
3970  ItemType* const pNewItem = PushBack();
3971  pNewItem->Value = value;
3972  return pNewItem;
3973 }
3974 
3975 template<typename T>
3976 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3977 {
3978  ItemType* const pNewItem = PushFront();
3979  pNewItem->Value = value;
3980  return pNewItem;
3981 }
3982 
3983 template<typename T>
3984 void VmaRawList<T>::PopBack()
3985 {
3986  VMA_HEAVY_ASSERT(m_Count > 0);
3987  ItemType* const pBackItem = m_pBack;
3988  ItemType* const pPrevItem = pBackItem->pPrev;
3989  if(pPrevItem != VMA_NULL)
3990  {
3991  pPrevItem->pNext = VMA_NULL;
3992  }
3993  m_pBack = pPrevItem;
3994  m_ItemAllocator.Free(pBackItem);
3995  --m_Count;
3996 }
3997 
3998 template<typename T>
3999 void VmaRawList<T>::PopFront()
4000 {
4001  VMA_HEAVY_ASSERT(m_Count > 0);
4002  ItemType* const pFrontItem = m_pFront;
4003  ItemType* const pNextItem = pFrontItem->pNext;
4004  if(pNextItem != VMA_NULL)
4005  {
4006  pNextItem->pPrev = VMA_NULL;
4007  }
4008  m_pFront = pNextItem;
4009  m_ItemAllocator.Free(pFrontItem);
4010  --m_Count;
4011 }
4012 
4013 template<typename T>
4014 void VmaRawList<T>::Remove(ItemType* pItem)
4015 {
4016  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4017  VMA_HEAVY_ASSERT(m_Count > 0);
4018 
4019  if(pItem->pPrev != VMA_NULL)
4020  {
4021  pItem->pPrev->pNext = pItem->pNext;
4022  }
4023  else
4024  {
4025  VMA_HEAVY_ASSERT(m_pFront == pItem);
4026  m_pFront = pItem->pNext;
4027  }
4028 
4029  if(pItem->pNext != VMA_NULL)
4030  {
4031  pItem->pNext->pPrev = pItem->pPrev;
4032  }
4033  else
4034  {
4035  VMA_HEAVY_ASSERT(m_pBack == pItem);
4036  m_pBack = pItem->pPrev;
4037  }
4038 
4039  m_ItemAllocator.Free(pItem);
4040  --m_Count;
4041 }
4042 
4043 template<typename T>
4044 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4045 {
4046  if(pItem != VMA_NULL)
4047  {
4048  ItemType* const prevItem = pItem->pPrev;
4049  ItemType* const newItem = m_ItemAllocator.Alloc();
4050  newItem->pPrev = prevItem;
4051  newItem->pNext = pItem;
4052  pItem->pPrev = newItem;
4053  if(prevItem != VMA_NULL)
4054  {
4055  prevItem->pNext = newItem;
4056  }
4057  else
4058  {
4059  VMA_HEAVY_ASSERT(m_pFront == pItem);
4060  m_pFront = newItem;
4061  }
4062  ++m_Count;
4063  return newItem;
4064  }
4065  else
4066  return PushBack();
4067 }
4068 
4069 template<typename T>
4070 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4071 {
4072  if(pItem != VMA_NULL)
4073  {
4074  ItemType* const nextItem = pItem->pNext;
4075  ItemType* const newItem = m_ItemAllocator.Alloc();
4076  newItem->pNext = nextItem;
4077  newItem->pPrev = pItem;
4078  pItem->pNext = newItem;
4079  if(nextItem != VMA_NULL)
4080  {
4081  nextItem->pPrev = newItem;
4082  }
4083  else
4084  {
4085  VMA_HEAVY_ASSERT(m_pBack == pItem);
4086  m_pBack = newItem;
4087  }
4088  ++m_Count;
4089  return newItem;
4090  }
4091  else
4092  return PushFront();
4093 }
4094 
4095 template<typename T>
4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4097 {
4098  ItemType* const newItem = InsertBefore(pItem);
4099  newItem->Value = value;
4100  return newItem;
4101 }
4102 
4103 template<typename T>
4104 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4105 {
4106  ItemType* const newItem = InsertAfter(pItem);
4107  newItem->Value = value;
4108  return newItem;
4109 }
4110 
4111 template<typename T, typename AllocatorT>
4112 class VmaList
4113 {
4114  VMA_CLASS_NO_COPY(VmaList)
4115 public:
4116  class iterator
4117  {
4118  public:
4119  iterator() :
4120  m_pList(VMA_NULL),
4121  m_pItem(VMA_NULL)
4122  {
4123  }
4124 
4125  T& operator*() const
4126  {
4127  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4128  return m_pItem->Value;
4129  }
4130  T* operator->() const
4131  {
4132  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4133  return &m_pItem->Value;
4134  }
4135 
4136  iterator& operator++()
4137  {
4138  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4139  m_pItem = m_pItem->pNext;
4140  return *this;
4141  }
4142  iterator& operator--()
4143  {
4144  if(m_pItem != VMA_NULL)
4145  {
4146  m_pItem = m_pItem->pPrev;
4147  }
4148  else
4149  {
4150  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4151  m_pItem = m_pList->Back();
4152  }
4153  return *this;
4154  }
4155 
4156  iterator operator++(int)
4157  {
4158  iterator result = *this;
4159  ++*this;
4160  return result;
4161  }
4162  iterator operator--(int)
4163  {
4164  iterator result = *this;
4165  --*this;
4166  return result;
4167  }
4168 
4169  bool operator==(const iterator& rhs) const
4170  {
4171  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4172  return m_pItem == rhs.m_pItem;
4173  }
4174  bool operator!=(const iterator& rhs) const
4175  {
4176  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4177  return m_pItem != rhs.m_pItem;
4178  }
4179 
4180  private:
4181  VmaRawList<T>* m_pList;
4182  VmaListItem<T>* m_pItem;
4183 
4184  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4185  m_pList(pList),
4186  m_pItem(pItem)
4187  {
4188  }
4189 
4190  friend class VmaList<T, AllocatorT>;
4191  };
4192 
4193  class const_iterator
4194  {
4195  public:
4196  const_iterator() :
4197  m_pList(VMA_NULL),
4198  m_pItem(VMA_NULL)
4199  {
4200  }
4201 
4202  const_iterator(const iterator& src) :
4203  m_pList(src.m_pList),
4204  m_pItem(src.m_pItem)
4205  {
4206  }
4207 
4208  const T& operator*() const
4209  {
4210  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4211  return m_pItem->Value;
4212  }
4213  const T* operator->() const
4214  {
4215  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4216  return &m_pItem->Value;
4217  }
4218 
4219  const_iterator& operator++()
4220  {
4221  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4222  m_pItem = m_pItem->pNext;
4223  return *this;
4224  }
4225  const_iterator& operator--()
4226  {
4227  if(m_pItem != VMA_NULL)
4228  {
4229  m_pItem = m_pItem->pPrev;
4230  }
4231  else
4232  {
4233  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4234  m_pItem = m_pList->Back();
4235  }
4236  return *this;
4237  }
4238 
4239  const_iterator operator++(int)
4240  {
4241  const_iterator result = *this;
4242  ++*this;
4243  return result;
4244  }
4245  const_iterator operator--(int)
4246  {
4247  const_iterator result = *this;
4248  --*this;
4249  return result;
4250  }
4251 
4252  bool operator==(const const_iterator& rhs) const
4253  {
4254  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4255  return m_pItem == rhs.m_pItem;
4256  }
4257  bool operator!=(const const_iterator& rhs) const
4258  {
4259  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4260  return m_pItem != rhs.m_pItem;
4261  }
4262 
4263  private:
4264  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4265  m_pList(pList),
4266  m_pItem(pItem)
4267  {
4268  }
4269 
4270  const VmaRawList<T>* m_pList;
4271  const VmaListItem<T>* m_pItem;
4272 
4273  friend class VmaList<T, AllocatorT>;
4274  };
4275 
4276  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4277 
4278  bool empty() const { return m_RawList.IsEmpty(); }
4279  size_t size() const { return m_RawList.GetCount(); }
4280 
4281  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4282  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4283 
4284  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4285  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4286 
4287  void clear() { m_RawList.Clear(); }
4288  void push_back(const T& value) { m_RawList.PushBack(value); }
4289  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4290  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4291 
4292 private:
4293  VmaRawList<T> m_RawList;
4294 };
4295 
4296 #endif // #if VMA_USE_STL_LIST
4297 
4299 // class VmaMap
4300 
4301 // Unused in this version.
4302 #if 0
4303 
4304 #if VMA_USE_STL_UNORDERED_MAP
4305 
4306 #define VmaPair std::pair
4307 
4308 #define VMA_MAP_TYPE(KeyT, ValueT) \
4309  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4310 
4311 #else // #if VMA_USE_STL_UNORDERED_MAP
4312 
4313 template<typename T1, typename T2>
4314 struct VmaPair
4315 {
4316  T1 first;
4317  T2 second;
4318 
4319  VmaPair() : first(), second() { }
4320  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4321 };
4322 
4323 /* Class compatible with subset of interface of std::unordered_map.
4324 KeyT, ValueT must be POD because they will be stored in VmaVector.
4325 */
4326 template<typename KeyT, typename ValueT>
4327 class VmaMap
4328 {
4329 public:
4330  typedef VmaPair<KeyT, ValueT> PairType;
4331  typedef PairType* iterator;
4332 
4333  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4334 
4335  iterator begin() { return m_Vector.begin(); }
4336  iterator end() { return m_Vector.end(); }
4337 
4338  void insert(const PairType& pair);
4339  iterator find(const KeyT& key);
4340  void erase(iterator it);
4341 
4342 private:
4343  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4344 };
4345 
4346 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4347 
4348 template<typename FirstT, typename SecondT>
4349 struct VmaPairFirstLess
4350 {
4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4352  {
4353  return lhs.first < rhs.first;
4354  }
4355  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4356  {
4357  return lhs.first < rhsFirst;
4358  }
4359 };
4360 
4361 template<typename KeyT, typename ValueT>
4362 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4363 {
4364  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4365  m_Vector.data(),
4366  m_Vector.data() + m_Vector.size(),
4367  pair,
4368  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4369  VmaVectorInsert(m_Vector, indexToInsert, pair);
4370 }
4371 
4372 template<typename KeyT, typename ValueT>
4373 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4374 {
4375  PairType* it = VmaBinaryFindFirstNotLess(
4376  m_Vector.data(),
4377  m_Vector.data() + m_Vector.size(),
4378  key,
4379  VmaPairFirstLess<KeyT, ValueT>());
4380  if((it != m_Vector.end()) && (it->first == key))
4381  {
4382  return it;
4383  }
4384  else
4385  {
4386  return m_Vector.end();
4387  }
4388 }
4389 
4390 template<typename KeyT, typename ValueT>
4391 void VmaMap<KeyT, ValueT>::erase(iterator it)
4392 {
4393  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4394 }
4395 
4396 #endif // #if VMA_USE_STL_UNORDERED_MAP
4397 
4398 #endif // #if 0
4399 
4401 
4402 class VmaDeviceMemoryBlock;
4403 
4404 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4405 
4406 struct VmaAllocation_T
4407 {
4408  VMA_CLASS_NO_COPY(VmaAllocation_T)
4409 private:
4410  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4411 
4412  enum FLAGS
4413  {
4414  FLAG_USER_DATA_STRING = 0x01,
4415  };
4416 
4417 public:
4418  enum ALLOCATION_TYPE
4419  {
4420  ALLOCATION_TYPE_NONE,
4421  ALLOCATION_TYPE_BLOCK,
4422  ALLOCATION_TYPE_DEDICATED,
4423  };
4424 
4425  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4426  m_Alignment(1),
4427  m_Size(0),
4428  m_pUserData(VMA_NULL),
4429  m_LastUseFrameIndex(currentFrameIndex),
4430  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4431  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4432  m_MapCount(0),
4433  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4434  {
4435 #if VMA_STATS_STRING_ENABLED
4436  m_CreationFrameIndex = currentFrameIndex;
4437  m_BufferImageUsage = 0;
4438 #endif
4439  }
4440 
4441  ~VmaAllocation_T()
4442  {
4443  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4444 
4445  // Check if owned string was freed.
4446  VMA_ASSERT(m_pUserData == VMA_NULL);
4447  }
4448 
4449  void InitBlockAllocation(
4450  VmaPool hPool,
4451  VmaDeviceMemoryBlock* block,
4452  VkDeviceSize offset,
4453  VkDeviceSize alignment,
4454  VkDeviceSize size,
4455  VmaSuballocationType suballocationType,
4456  bool mapped,
4457  bool canBecomeLost)
4458  {
4459  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4460  VMA_ASSERT(block != VMA_NULL);
4461  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4462  m_Alignment = alignment;
4463  m_Size = size;
4464  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4465  m_SuballocationType = (uint8_t)suballocationType;
4466  m_BlockAllocation.m_hPool = hPool;
4467  m_BlockAllocation.m_Block = block;
4468  m_BlockAllocation.m_Offset = offset;
4469  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4470  }
4471 
4472  void InitLost()
4473  {
4474  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4475  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4476  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4477  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4478  m_BlockAllocation.m_Block = VMA_NULL;
4479  m_BlockAllocation.m_Offset = 0;
4480  m_BlockAllocation.m_CanBecomeLost = true;
4481  }
4482 
4483  void ChangeBlockAllocation(
4484  VmaAllocator hAllocator,
4485  VmaDeviceMemoryBlock* block,
4486  VkDeviceSize offset);
4487 
4488  // pMappedData not null means allocation is created with MAPPED flag.
4489  void InitDedicatedAllocation(
4490  uint32_t memoryTypeIndex,
4491  VkDeviceMemory hMemory,
4492  VmaSuballocationType suballocationType,
4493  void* pMappedData,
4494  VkDeviceSize size)
4495  {
4496  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4497  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4498  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4499  m_Alignment = 0;
4500  m_Size = size;
4501  m_SuballocationType = (uint8_t)suballocationType;
4502  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4503  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4504  m_DedicatedAllocation.m_hMemory = hMemory;
4505  m_DedicatedAllocation.m_pMappedData = pMappedData;
4506  }
4507 
4508  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4509  VkDeviceSize GetAlignment() const { return m_Alignment; }
4510  VkDeviceSize GetSize() const { return m_Size; }
4511  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4512  void* GetUserData() const { return m_pUserData; }
4513  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4514  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4515 
4516  VmaDeviceMemoryBlock* GetBlock() const
4517  {
4518  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4519  return m_BlockAllocation.m_Block;
4520  }
4521  VkDeviceSize GetOffset() const;
4522  VkDeviceMemory GetMemory() const;
4523  uint32_t GetMemoryTypeIndex() const;
4524  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4525  void* GetMappedData() const;
4526  bool CanBecomeLost() const;
4527  VmaPool GetPool() const;
4528 
4529  uint32_t GetLastUseFrameIndex() const
4530  {
4531  return m_LastUseFrameIndex.load();
4532  }
4533  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4534  {
4535  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4536  }
4537  /*
4538  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4539  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4540  - Else, returns false.
4541 
4542  If hAllocation is already lost, assert - you should not call it then.
4543  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4544  */
4545  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4546 
4547  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4548  {
4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4550  outInfo.blockCount = 1;
4551  outInfo.allocationCount = 1;
4552  outInfo.unusedRangeCount = 0;
4553  outInfo.usedBytes = m_Size;
4554  outInfo.unusedBytes = 0;
4555  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4556  outInfo.unusedRangeSizeMin = UINT64_MAX;
4557  outInfo.unusedRangeSizeMax = 0;
4558  }
4559 
4560  void BlockAllocMap();
4561  void BlockAllocUnmap();
4562  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4563  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4564 
4565 #if VMA_STATS_STRING_ENABLED
4566  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4567  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4568 
4569  void InitBufferImageUsage(uint32_t bufferImageUsage)
4570  {
4571  VMA_ASSERT(m_BufferImageUsage == 0);
4572  m_BufferImageUsage = bufferImageUsage;
4573  }
4574 
4575  void PrintParameters(class VmaJsonWriter& json) const;
4576 #endif
4577 
4578 private:
4579  VkDeviceSize m_Alignment;
4580  VkDeviceSize m_Size;
4581  void* m_pUserData;
4582  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4583  uint8_t m_Type; // ALLOCATION_TYPE
4584  uint8_t m_SuballocationType; // VmaSuballocationType
4585  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4586  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4587  uint8_t m_MapCount;
4588  uint8_t m_Flags; // enum FLAGS
4589 
4590  // Allocation out of VmaDeviceMemoryBlock.
4591  struct BlockAllocation
4592  {
4593  VmaPool m_hPool; // Null if belongs to general memory.
4594  VmaDeviceMemoryBlock* m_Block;
4595  VkDeviceSize m_Offset;
4596  bool m_CanBecomeLost;
4597  };
4598 
4599  // Allocation for an object that has its own private VkDeviceMemory.
4600  struct DedicatedAllocation
4601  {
4602  uint32_t m_MemoryTypeIndex;
4603  VkDeviceMemory m_hMemory;
4604  void* m_pMappedData; // Not null means memory is mapped.
4605  };
4606 
4607  union
4608  {
4609  // Allocation out of VmaDeviceMemoryBlock.
4610  BlockAllocation m_BlockAllocation;
4611  // Allocation for an object that has its own private VkDeviceMemory.
4612  DedicatedAllocation m_DedicatedAllocation;
4613  };
4614 
4615 #if VMA_STATS_STRING_ENABLED
4616  uint32_t m_CreationFrameIndex;
4617  uint32_t m_BufferImageUsage; // 0 if unknown.
4618 #endif
4619 
4620  void FreeUserDataString(VmaAllocator hAllocator);
4621 };
4622 
4623 /*
4624 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4625 allocated memory block or free.
4626 */
4627 struct VmaSuballocation
4628 {
4629  VkDeviceSize offset;
4630  VkDeviceSize size;
4631  VmaAllocation hAllocation;
4632  VmaSuballocationType type;
4633 };
4634 
4635 // Comparator for offsets.
4636 struct VmaSuballocationOffsetLess
4637 {
4638  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4639  {
4640  return lhs.offset < rhs.offset;
4641  }
4642 };
4643 struct VmaSuballocationOffsetGreater
4644 {
4645  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4646  {
4647  return lhs.offset > rhs.offset;
4648  }
4649 };
4650 
4651 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4652 
4653 // Cost of one additional allocation lost, as equivalent in bytes.
4654 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4655 
4656 /*
4657 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4658 
4659 If canMakeOtherLost was false:
4660 - item points to a FREE suballocation.
4661 - itemsToMakeLostCount is 0.
4662 
4663 If canMakeOtherLost was true:
4664 - item points to first of sequence of suballocations, which are either FREE,
4665  or point to VmaAllocations that can become lost.
4666 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4667  the requested allocation to succeed.
4668 */
4669 struct VmaAllocationRequest
4670 {
4671  VkDeviceSize offset;
4672  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4673  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4674  VmaSuballocationList::iterator item;
4675  size_t itemsToMakeLostCount;
4676  void* customData;
4677 
4678  VkDeviceSize CalcCost() const
4679  {
4680  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4681  }
4682 };
4683 
4684 /*
4685 Data structure used for bookkeeping of allocations and unused ranges of memory
4686 in a single VkDeviceMemory block.
4687 */
4688 class VmaBlockMetadata
4689 {
4690 public:
4691  VmaBlockMetadata(VmaAllocator hAllocator);
4692  virtual ~VmaBlockMetadata() { }
4693  virtual void Init(VkDeviceSize size) { m_Size = size; }
4694 
4695  // Validates all data structures inside this object. If not valid, returns false.
4696  virtual bool Validate() const = 0;
4697  VkDeviceSize GetSize() const { return m_Size; }
4698  virtual size_t GetAllocationCount() const = 0;
4699  virtual VkDeviceSize GetSumFreeSize() const = 0;
4700  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4701  // Returns true if this block is empty - contains only single free suballocation.
4702  virtual bool IsEmpty() const = 0;
4703 
4704  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4705  // Shouldn't modify blockCount.
4706  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4707 
4708 #if VMA_STATS_STRING_ENABLED
4709  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4710 #endif
4711 
4712  // Tries to find a place for suballocation with given parameters inside this block.
4713  // If succeeded, fills pAllocationRequest and returns true.
4714  // If failed, returns false.
4715  virtual bool CreateAllocationRequest(
4716  uint32_t currentFrameIndex,
4717  uint32_t frameInUseCount,
4718  VkDeviceSize bufferImageGranularity,
4719  VkDeviceSize allocSize,
4720  VkDeviceSize allocAlignment,
4721  bool upperAddress,
4722  VmaSuballocationType allocType,
4723  bool canMakeOtherLost,
4724  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4725  VmaAllocationRequest* pAllocationRequest) = 0;
4726 
4727  virtual bool MakeRequestedAllocationsLost(
4728  uint32_t currentFrameIndex,
4729  uint32_t frameInUseCount,
4730  VmaAllocationRequest* pAllocationRequest) = 0;
4731 
4732  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4733 
4734  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4735 
4736  // Makes actual allocation based on request. Request must already be checked and valid.
4737  virtual void Alloc(
4738  const VmaAllocationRequest& request,
4739  VmaSuballocationType type,
4740  VkDeviceSize allocSize,
4741  bool upperAddress,
4742  VmaAllocation hAllocation) = 0;
4743 
4744  // Frees suballocation assigned to given memory region.
4745  virtual void Free(const VmaAllocation allocation) = 0;
4746  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4747 
4748 protected:
4749  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4750 
4751 #if VMA_STATS_STRING_ENABLED
4752  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4753  VkDeviceSize unusedBytes,
4754  size_t allocationCount,
4755  size_t unusedRangeCount) const;
4756  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4757  VkDeviceSize offset,
4758  VmaAllocation hAllocation) const;
4759  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4760  VkDeviceSize offset,
4761  VkDeviceSize size) const;
4762  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4763 #endif
4764 
4765 private:
4766  VkDeviceSize m_Size;
4767  const VkAllocationCallbacks* m_pAllocationCallbacks;
4768 };
4769 
4770 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4771  VMA_ASSERT(0 && "Validation failed: " #cond); \
4772  return false; \
4773  } } while(false)
4774 
4775 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4776 {
4777  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4778 public:
4779  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4780  virtual ~VmaBlockMetadata_Generic();
4781  virtual void Init(VkDeviceSize size);
4782 
4783  virtual bool Validate() const;
4784  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4785  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4786  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4787  virtual bool IsEmpty() const;
4788 
4789  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4790  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4791 
4792 #if VMA_STATS_STRING_ENABLED
4793  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4794 #endif
4795 
4796  virtual bool CreateAllocationRequest(
4797  uint32_t currentFrameIndex,
4798  uint32_t frameInUseCount,
4799  VkDeviceSize bufferImageGranularity,
4800  VkDeviceSize allocSize,
4801  VkDeviceSize allocAlignment,
4802  bool upperAddress,
4803  VmaSuballocationType allocType,
4804  bool canMakeOtherLost,
4805  uint32_t strategy,
4806  VmaAllocationRequest* pAllocationRequest);
4807 
4808  virtual bool MakeRequestedAllocationsLost(
4809  uint32_t currentFrameIndex,
4810  uint32_t frameInUseCount,
4811  VmaAllocationRequest* pAllocationRequest);
4812 
4813  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4814 
4815  virtual VkResult CheckCorruption(const void* pBlockData);
4816 
4817  virtual void Alloc(
4818  const VmaAllocationRequest& request,
4819  VmaSuballocationType type,
4820  VkDeviceSize allocSize,
4821  bool upperAddress,
4822  VmaAllocation hAllocation);
4823 
4824  virtual void Free(const VmaAllocation allocation);
4825  virtual void FreeAtOffset(VkDeviceSize offset);
4826 
4827 private:
4828  uint32_t m_FreeCount;
4829  VkDeviceSize m_SumFreeSize;
4830  VmaSuballocationList m_Suballocations;
4831  // Suballocations that are free and have size greater than certain threshold.
4832  // Sorted by size, ascending.
4833  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4834 
4835  bool ValidateFreeSuballocationList() const;
4836 
4837  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4838  // If yes, fills pOffset and returns true. If no, returns false.
4839  bool CheckAllocation(
4840  uint32_t currentFrameIndex,
4841  uint32_t frameInUseCount,
4842  VkDeviceSize bufferImageGranularity,
4843  VkDeviceSize allocSize,
4844  VkDeviceSize allocAlignment,
4845  VmaSuballocationType allocType,
4846  VmaSuballocationList::const_iterator suballocItem,
4847  bool canMakeOtherLost,
4848  VkDeviceSize* pOffset,
4849  size_t* itemsToMakeLostCount,
4850  VkDeviceSize* pSumFreeSize,
4851  VkDeviceSize* pSumItemSize) const;
4852  // Given free suballocation, it merges it with following one, which must also be free.
4853  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4854  // Releases given suballocation, making it free.
4855  // Merges it with adjacent free suballocations if applicable.
4856  // Returns iterator to new free suballocation at this place.
4857  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4858  // Given free suballocation, it inserts it into sorted list of
4859  // m_FreeSuballocationsBySize if it's suitable.
4860  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4861  // Given free suballocation, it removes it from sorted list of
4862  // m_FreeSuballocationsBySize if it's suitable.
4863  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4864 };
4865 
4866 /*
4867 Allocations and their references in internal data structure look like this:
4868 
4869 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4870 
4871  0 +-------+
4872  | |
4873  | |
4874  | |
4875  +-------+
4876  | Alloc | 1st[m_1stNullItemsBeginCount]
4877  +-------+
4878  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4879  +-------+
4880  | ... |
4881  +-------+
4882  | Alloc | 1st[1st.size() - 1]
4883  +-------+
4884  | |
4885  | |
4886  | |
4887 GetSize() +-------+
4888 
4889 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4890 
4891  0 +-------+
4892  | Alloc | 2nd[0]
4893  +-------+
4894  | Alloc | 2nd[1]
4895  +-------+
4896  | ... |
4897  +-------+
4898  | Alloc | 2nd[2nd.size() - 1]
4899  +-------+
4900  | |
4901  | |
4902  | |
4903  +-------+
4904  | Alloc | 1st[m_1stNullItemsBeginCount]
4905  +-------+
4906  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4907  +-------+
4908  | ... |
4909  +-------+
4910  | Alloc | 1st[1st.size() - 1]
4911  +-------+
4912  | |
4913 GetSize() +-------+
4914 
4915 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4916 
4917  0 +-------+
4918  | |
4919  | |
4920  | |
4921  +-------+
4922  | Alloc | 1st[m_1stNullItemsBeginCount]
4923  +-------+
4924  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4925  +-------+
4926  | ... |
4927  +-------+
4928  | Alloc | 1st[1st.size() - 1]
4929  +-------+
4930  | |
4931  | |
4932  | |
4933  +-------+
4934  | Alloc | 2nd[2nd.size() - 1]
4935  +-------+
4936  | ... |
4937  +-------+
4938  | Alloc | 2nd[1]
4939  +-------+
4940  | Alloc | 2nd[0]
4941 GetSize() +-------+
4942 
4943 */
4944 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4945 {
4946  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4947 public:
4948  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4949  virtual ~VmaBlockMetadata_Linear();
4950  virtual void Init(VkDeviceSize size);
4951 
4952  virtual bool Validate() const;
4953  virtual size_t GetAllocationCount() const;
4954  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4955  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4956  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4957 
4958  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4959  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4960 
4961 #if VMA_STATS_STRING_ENABLED
4962  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4963 #endif
4964 
4965  virtual bool CreateAllocationRequest(
4966  uint32_t currentFrameIndex,
4967  uint32_t frameInUseCount,
4968  VkDeviceSize bufferImageGranularity,
4969  VkDeviceSize allocSize,
4970  VkDeviceSize allocAlignment,
4971  bool upperAddress,
4972  VmaSuballocationType allocType,
4973  bool canMakeOtherLost,
4974  uint32_t strategy,
4975  VmaAllocationRequest* pAllocationRequest);
4976 
4977  virtual bool MakeRequestedAllocationsLost(
4978  uint32_t currentFrameIndex,
4979  uint32_t frameInUseCount,
4980  VmaAllocationRequest* pAllocationRequest);
4981 
4982  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4983 
4984  virtual VkResult CheckCorruption(const void* pBlockData);
4985 
4986  virtual void Alloc(
4987  const VmaAllocationRequest& request,
4988  VmaSuballocationType type,
4989  VkDeviceSize allocSize,
4990  bool upperAddress,
4991  VmaAllocation hAllocation);
4992 
4993  virtual void Free(const VmaAllocation allocation);
4994  virtual void FreeAtOffset(VkDeviceSize offset);
4995 
4996 private:
4997  /*
4998  There are two suballocation vectors, used in ping-pong way.
4999  The one with index m_1stVectorIndex is called 1st.
5000  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5001  2nd can be non-empty only when 1st is not empty.
5002  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5003  */
5004  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5005 
5006  enum SECOND_VECTOR_MODE
5007  {
5008  SECOND_VECTOR_EMPTY,
5009  /*
5010  Suballocations in 2nd vector are created later than the ones in 1st, but they
5011  all have smaller offset.
5012  */
5013  SECOND_VECTOR_RING_BUFFER,
5014  /*
5015  Suballocations in 2nd vector are upper side of double stack.
5016  They all have offsets higher than those in 1st vector.
5017  Top of this stack means smaller offsets, but higher indices in this vector.
5018  */
5019  SECOND_VECTOR_DOUBLE_STACK,
5020  };
5021 
5022  VkDeviceSize m_SumFreeSize;
5023  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5024  uint32_t m_1stVectorIndex;
5025  SECOND_VECTOR_MODE m_2ndVectorMode;
5026 
5027  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5028  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5029  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5030  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5031 
5032  // Number of items in 1st vector with hAllocation = null at the beginning.
5033  size_t m_1stNullItemsBeginCount;
5034  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5035  size_t m_1stNullItemsMiddleCount;
5036  // Number of items in 2nd vector with hAllocation = null.
5037  size_t m_2ndNullItemsCount;
5038 
5039  bool ShouldCompact1st() const;
5040  void CleanupAfterFree();
5041 };
5042 
5043 /*
5044 - GetSize() is the original size of allocated memory block.
5045 - m_UsableSize is this size aligned down to a power of two.
5046  All allocations and calculations happen relative to m_UsableSize.
5047 - GetUnusableSize() is the difference between them.
5048  It is repoted as separate, unused range, not available for allocations.
5049 
5050 Node at level 0 has size = m_UsableSize.
5051 Each next level contains nodes with size 2 times smaller than current level.
5052 m_LevelCount is the maximum number of levels to use in the current object.
5053 */
5054 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5055 {
5056  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5057 public:
5058  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5059  virtual ~VmaBlockMetadata_Buddy();
5060  virtual void Init(VkDeviceSize size);
5061 
5062  virtual bool Validate() const;
5063  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5064  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5065  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5066  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5067 
5068  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5069  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5070 
5071 #if VMA_STATS_STRING_ENABLED
5072  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5073 #endif
5074 
5075  virtual bool CreateAllocationRequest(
5076  uint32_t currentFrameIndex,
5077  uint32_t frameInUseCount,
5078  VkDeviceSize bufferImageGranularity,
5079  VkDeviceSize allocSize,
5080  VkDeviceSize allocAlignment,
5081  bool upperAddress,
5082  VmaSuballocationType allocType,
5083  bool canMakeOtherLost,
5084  uint32_t strategy,
5085  VmaAllocationRequest* pAllocationRequest);
5086 
5087  virtual bool MakeRequestedAllocationsLost(
5088  uint32_t currentFrameIndex,
5089  uint32_t frameInUseCount,
5090  VmaAllocationRequest* pAllocationRequest);
5091 
5092  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5093 
5094  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5095 
5096  virtual void Alloc(
5097  const VmaAllocationRequest& request,
5098  VmaSuballocationType type,
5099  VkDeviceSize allocSize,
5100  bool upperAddress,
5101  VmaAllocation hAllocation);
5102 
5103  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5104  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5105 
5106 private:
5107  static const VkDeviceSize MIN_NODE_SIZE = 32;
5108  static const size_t MAX_LEVELS = 30;
5109 
5110  struct ValidationContext
5111  {
5112  size_t calculatedAllocationCount;
5113  size_t calculatedFreeCount;
5114  VkDeviceSize calculatedSumFreeSize;
5115 
5116  ValidationContext() :
5117  calculatedAllocationCount(0),
5118  calculatedFreeCount(0),
5119  calculatedSumFreeSize(0) { }
5120  };
5121 
5122  struct Node
5123  {
5124  VkDeviceSize offset;
5125  enum TYPE
5126  {
5127  TYPE_FREE,
5128  TYPE_ALLOCATION,
5129  TYPE_SPLIT,
5130  TYPE_COUNT
5131  } type;
5132  Node* parent;
5133  Node* buddy;
5134 
5135  union
5136  {
5137  struct
5138  {
5139  Node* prev;
5140  Node* next;
5141  } free;
5142  struct
5143  {
5144  VmaAllocation alloc;
5145  } allocation;
5146  struct
5147  {
5148  Node* leftChild;
5149  } split;
5150  };
5151  };
5152 
5153  // Size of the memory block aligned down to a power of two.
5154  VkDeviceSize m_UsableSize;
5155  uint32_t m_LevelCount;
5156 
5157  Node* m_Root;
5158  struct {
5159  Node* front;
5160  Node* back;
5161  } m_FreeList[MAX_LEVELS];
5162  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5163  size_t m_AllocationCount;
5164  // Number of nodes in the tree with type == TYPE_FREE.
5165  size_t m_FreeCount;
5166  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5167  VkDeviceSize m_SumFreeSize;
5168 
5169  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5170  void DeleteNode(Node* node);
5171  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5172  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5173  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5174  // Alloc passed just for validation. Can be null.
5175  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5176  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5177  // Adds node to the front of FreeList at given level.
5178  // node->type must be FREE.
5179  // node->free.prev, next can be undefined.
5180  void AddToFreeListFront(uint32_t level, Node* node);
5181  // Removes node from FreeList at given level.
5182  // node->type must be FREE.
5183  // node->free.prev, next stay untouched.
5184  void RemoveFromFreeList(uint32_t level, Node* node);
5185 
5186 #if VMA_STATS_STRING_ENABLED
5187  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5188 #endif
5189 };
5190 
5191 /*
5192 Represents a single block of device memory (`VkDeviceMemory`) with all the
5193 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5194 
5195 Thread-safety: This class must be externally synchronized.
5196 */
5197 class VmaDeviceMemoryBlock
5198 {
5199  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5200 public:
5201  VmaBlockMetadata* m_pMetadata;
5202 
5203  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5204 
5205  ~VmaDeviceMemoryBlock()
5206  {
5207  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5208  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5209  }
5210 
5211  // Always call after construction.
5212  void Init(
5213  VmaAllocator hAllocator,
5214  uint32_t newMemoryTypeIndex,
5215  VkDeviceMemory newMemory,
5216  VkDeviceSize newSize,
5217  uint32_t id,
5218  uint32_t algorithm);
5219  // Always call before destruction.
5220  void Destroy(VmaAllocator allocator);
5221 
5222  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5223  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5224  uint32_t GetId() const { return m_Id; }
5225  void* GetMappedData() const { return m_pMappedData; }
5226 
5227  // Validates all data structures inside this object. If not valid, returns false.
5228  bool Validate() const;
5229 
5230  VkResult CheckCorruption(VmaAllocator hAllocator);
5231 
5232  // ppData can be null.
5233  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5234  void Unmap(VmaAllocator hAllocator, uint32_t count);
5235 
5236  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5237  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5238 
5239  VkResult BindBufferMemory(
5240  const VmaAllocator hAllocator,
5241  const VmaAllocation hAllocation,
5242  VkBuffer hBuffer);
5243  VkResult BindImageMemory(
5244  const VmaAllocator hAllocator,
5245  const VmaAllocation hAllocation,
5246  VkImage hImage);
5247 
5248 private:
5249  uint32_t m_MemoryTypeIndex;
5250  uint32_t m_Id;
5251  VkDeviceMemory m_hMemory;
5252 
5253  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5254  // Also protects m_MapCount, m_pMappedData.
5255  VMA_MUTEX m_Mutex;
5256  uint32_t m_MapCount;
5257  void* m_pMappedData;
5258 };
5259 
5260 struct VmaPointerLess
5261 {
5262  bool operator()(const void* lhs, const void* rhs) const
5263  {
5264  return lhs < rhs;
5265  }
5266 };
5267 
5268 class VmaDefragmentator;
5269 
5270 /*
5271 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5272 Vulkan memory type.
5273 
5274 Synchronized internally with a mutex.
5275 */
5276 struct VmaBlockVector
5277 {
5278  VMA_CLASS_NO_COPY(VmaBlockVector)
5279 public:
5280  VmaBlockVector(
5281  VmaAllocator hAllocator,
5282  uint32_t memoryTypeIndex,
5283  VkDeviceSize preferredBlockSize,
5284  size_t minBlockCount,
5285  size_t maxBlockCount,
5286  VkDeviceSize bufferImageGranularity,
5287  uint32_t frameInUseCount,
5288  bool isCustomPool,
5289  bool explicitBlockSize,
5290  uint32_t algorithm);
5291  ~VmaBlockVector();
5292 
5293  VkResult CreateMinBlocks();
5294 
5295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5299  uint32_t GetAlgorithm() const { return m_Algorithm; }
5300 
5301  void GetPoolStats(VmaPoolStats* pStats);
5302 
5303  bool IsEmpty() const { return m_Blocks.empty(); }
5304  bool IsCorruptionDetectionEnabled() const;
5305 
5306  VkResult Allocate(
5307  VmaPool hCurrentPool,
5308  uint32_t currentFrameIndex,
5309  VkDeviceSize size,
5310  VkDeviceSize alignment,
5311  const VmaAllocationCreateInfo& createInfo,
5312  VmaSuballocationType suballocType,
5313  VmaAllocation* pAllocation);
5314 
5315  void Free(
5316  VmaAllocation hAllocation);
5317 
5318  // Adds statistics of this BlockVector to pStats.
5319  void AddStats(VmaStats* pStats);
5320 
5321 #if VMA_STATS_STRING_ENABLED
5322  void PrintDetailedMap(class VmaJsonWriter& json);
5323 #endif
5324 
5325  void MakePoolAllocationsLost(
5326  uint32_t currentFrameIndex,
5327  size_t* pLostAllocationCount);
5328  VkResult CheckCorruption();
5329 
5330  VmaDefragmentator* EnsureDefragmentator(
5331  VmaAllocator hAllocator,
5332  uint32_t currentFrameIndex);
5333 
5334  VkResult Defragment(
5335  VmaDefragmentationStats* pDefragmentationStats,
5336  VkDeviceSize& maxBytesToMove,
5337  uint32_t& maxAllocationsToMove);
5338 
5339  void DestroyDefragmentator();
5340 
5341 private:
5342  friend class VmaDefragmentator;
5343 
5344  const VmaAllocator m_hAllocator;
5345  const uint32_t m_MemoryTypeIndex;
5346  const VkDeviceSize m_PreferredBlockSize;
5347  const size_t m_MinBlockCount;
5348  const size_t m_MaxBlockCount;
5349  const VkDeviceSize m_BufferImageGranularity;
5350  const uint32_t m_FrameInUseCount;
5351  const bool m_IsCustomPool;
5352  const bool m_ExplicitBlockSize;
5353  const uint32_t m_Algorithm;
5354  bool m_HasEmptyBlock;
5355  VMA_MUTEX m_Mutex;
5356  // Incrementally sorted by sumFreeSize, ascending.
5357  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5358  /* There can be at most one allocation that is completely empty - a
5359  hysteresis to avoid pessimistic case of alternating creation and destruction
5360  of a VkDeviceMemory. */
5361  VmaDefragmentator* m_pDefragmentator;
5362  uint32_t m_NextBlockId;
5363 
5364  VkDeviceSize CalcMaxBlockSize() const;
5365 
5366  // Finds and removes given block from vector.
5367  void Remove(VmaDeviceMemoryBlock* pBlock);
5368 
5369  // Performs single step in sorting m_Blocks. They may not be fully sorted
5370  // after this call.
5371  void IncrementallySortBlocks();
5372 
5373  // To be used only without CAN_MAKE_OTHER_LOST flag.
5374  VkResult AllocateFromBlock(
5375  VmaDeviceMemoryBlock* pBlock,
5376  VmaPool hCurrentPool,
5377  uint32_t currentFrameIndex,
5378  VkDeviceSize size,
5379  VkDeviceSize alignment,
5380  VmaAllocationCreateFlags allocFlags,
5381  void* pUserData,
5382  VmaSuballocationType suballocType,
5383  uint32_t strategy,
5384  VmaAllocation* pAllocation);
5385 
5386  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5387 };
5388 
5389 struct VmaPool_T
5390 {
5391  VMA_CLASS_NO_COPY(VmaPool_T)
5392 public:
5393  VmaBlockVector m_BlockVector;
5394 
5395  VmaPool_T(
5396  VmaAllocator hAllocator,
5397  const VmaPoolCreateInfo& createInfo,
5398  VkDeviceSize preferredBlockSize);
5399  ~VmaPool_T();
5400 
5401  uint32_t GetId() const { return m_Id; }
5402  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5403 
5404 #if VMA_STATS_STRING_ENABLED
5405  //void PrintDetailedMap(class VmaStringBuilder& sb);
5406 #endif
5407 
5408 private:
5409  uint32_t m_Id;
5410 };
5411 
5412 class VmaDefragmentator
5413 {
5414  VMA_CLASS_NO_COPY(VmaDefragmentator)
5415 private:
5416  const VmaAllocator m_hAllocator;
5417  VmaBlockVector* const m_pBlockVector;
5418  uint32_t m_CurrentFrameIndex;
5419  VkDeviceSize m_BytesMoved;
5420  uint32_t m_AllocationsMoved;
5421 
5422  struct AllocationInfo
5423  {
5424  VmaAllocation m_hAllocation;
5425  VkBool32* m_pChanged;
5426 
5427  AllocationInfo() :
5428  m_hAllocation(VK_NULL_HANDLE),
5429  m_pChanged(VMA_NULL)
5430  {
5431  }
5432  };
5433 
5434  struct AllocationInfoSizeGreater
5435  {
5436  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5437  {
5438  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5439  }
5440  };
5441 
5442  // Used between AddAllocation and Defragment.
5443  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5444 
5445  struct BlockInfo
5446  {
5447  VmaDeviceMemoryBlock* m_pBlock;
5448  bool m_HasNonMovableAllocations;
5449  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5450 
5451  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5452  m_pBlock(VMA_NULL),
5453  m_HasNonMovableAllocations(true),
5454  m_Allocations(pAllocationCallbacks),
5455  m_pMappedDataForDefragmentation(VMA_NULL)
5456  {
5457  }
5458 
5459  void CalcHasNonMovableAllocations()
5460  {
5461  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5462  const size_t defragmentAllocCount = m_Allocations.size();
5463  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5464  }
5465 
5466  void SortAllocationsBySizeDescecnding()
5467  {
5468  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5469  }
5470 
5471  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5472  void Unmap(VmaAllocator hAllocator);
5473 
5474  private:
5475  // Not null if mapped for defragmentation only, not originally mapped.
5476  void* m_pMappedDataForDefragmentation;
5477  };
5478 
5479  struct BlockPointerLess
5480  {
5481  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5482  {
5483  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5484  }
5485  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5486  {
5487  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5488  }
5489  };
5490 
5491  // 1. Blocks with some non-movable allocations go first.
5492  // 2. Blocks with smaller sumFreeSize go first.
5493  struct BlockInfoCompareMoveDestination
5494  {
5495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5496  {
5497  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5498  {
5499  return true;
5500  }
5501  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5502  {
5503  return false;
5504  }
5505  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5506  {
5507  return true;
5508  }
5509  return false;
5510  }
5511  };
5512 
5513  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5514  BlockInfoVector m_Blocks;
5515 
5516  VkResult DefragmentRound(
5517  VkDeviceSize maxBytesToMove,
5518  uint32_t maxAllocationsToMove);
5519 
5520  static bool MoveMakesSense(
5521  size_t dstBlockIndex, VkDeviceSize dstOffset,
5522  size_t srcBlockIndex, VkDeviceSize srcOffset);
5523 
5524 public:
5525  VmaDefragmentator(
5526  VmaAllocator hAllocator,
5527  VmaBlockVector* pBlockVector,
5528  uint32_t currentFrameIndex);
5529 
5530  ~VmaDefragmentator();
5531 
5532  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5533  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5534 
5535  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5536 
5537  VkResult Defragment(
5538  VkDeviceSize maxBytesToMove,
5539  uint32_t maxAllocationsToMove);
5540 };
5541 
5542 #if VMA_RECORDING_ENABLED
5543 
5544 class VmaRecorder
5545 {
5546 public:
5547  VmaRecorder();
5548  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5549  void WriteConfiguration(
5550  const VkPhysicalDeviceProperties& devProps,
5551  const VkPhysicalDeviceMemoryProperties& memProps,
5552  bool dedicatedAllocationExtensionEnabled);
5553  ~VmaRecorder();
5554 
5555  void RecordCreateAllocator(uint32_t frameIndex);
5556  void RecordDestroyAllocator(uint32_t frameIndex);
5557  void RecordCreatePool(uint32_t frameIndex,
5558  const VmaPoolCreateInfo& createInfo,
5559  VmaPool pool);
5560  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5561  void RecordAllocateMemory(uint32_t frameIndex,
5562  const VkMemoryRequirements& vkMemReq,
5563  const VmaAllocationCreateInfo& createInfo,
5564  VmaAllocation allocation);
5565  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5566  const VkMemoryRequirements& vkMemReq,
5567  bool requiresDedicatedAllocation,
5568  bool prefersDedicatedAllocation,
5569  const VmaAllocationCreateInfo& createInfo,
5570  VmaAllocation allocation);
5571  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5572  const VkMemoryRequirements& vkMemReq,
5573  bool requiresDedicatedAllocation,
5574  bool prefersDedicatedAllocation,
5575  const VmaAllocationCreateInfo& createInfo,
5576  VmaAllocation allocation);
5577  void RecordFreeMemory(uint32_t frameIndex,
5578  VmaAllocation allocation);
5579  void RecordSetAllocationUserData(uint32_t frameIndex,
5580  VmaAllocation allocation,
5581  const void* pUserData);
5582  void RecordCreateLostAllocation(uint32_t frameIndex,
5583  VmaAllocation allocation);
5584  void RecordMapMemory(uint32_t frameIndex,
5585  VmaAllocation allocation);
5586  void RecordUnmapMemory(uint32_t frameIndex,
5587  VmaAllocation allocation);
5588  void RecordFlushAllocation(uint32_t frameIndex,
5589  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5590  void RecordInvalidateAllocation(uint32_t frameIndex,
5591  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5592  void RecordCreateBuffer(uint32_t frameIndex,
5593  const VkBufferCreateInfo& bufCreateInfo,
5594  const VmaAllocationCreateInfo& allocCreateInfo,
5595  VmaAllocation allocation);
5596  void RecordCreateImage(uint32_t frameIndex,
5597  const VkImageCreateInfo& imageCreateInfo,
5598  const VmaAllocationCreateInfo& allocCreateInfo,
5599  VmaAllocation allocation);
5600  void RecordDestroyBuffer(uint32_t frameIndex,
5601  VmaAllocation allocation);
5602  void RecordDestroyImage(uint32_t frameIndex,
5603  VmaAllocation allocation);
5604  void RecordTouchAllocation(uint32_t frameIndex,
5605  VmaAllocation allocation);
5606  void RecordGetAllocationInfo(uint32_t frameIndex,
5607  VmaAllocation allocation);
5608  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5609  VmaPool pool);
5610 
5611 private:
5612  struct CallParams
5613  {
5614  uint32_t threadId;
5615  double time;
5616  };
5617 
5618  class UserDataString
5619  {
5620  public:
5621  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5622  const char* GetString() const { return m_Str; }
5623 
5624  private:
5625  char m_PtrStr[17];
5626  const char* m_Str;
5627  };
5628 
5629  bool m_UseMutex;
5630  VmaRecordFlags m_Flags;
5631  FILE* m_File;
5632  VMA_MUTEX m_FileMutex;
5633  int64_t m_Freq;
5634  int64_t m_StartCounter;
5635 
5636  void GetBasicParams(CallParams& outParams);
5637  void Flush();
5638 };
5639 
5640 #endif // #if VMA_RECORDING_ENABLED
5641 
5642 // Main allocator object.
5643 struct VmaAllocator_T
5644 {
5645  VMA_CLASS_NO_COPY(VmaAllocator_T)
5646 public:
5647  bool m_UseMutex;
5648  bool m_UseKhrDedicatedAllocation;
5649  VkDevice m_hDevice;
5650  bool m_AllocationCallbacksSpecified;
5651  VkAllocationCallbacks m_AllocationCallbacks;
5652  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5653 
5654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5656  VMA_MUTEX m_HeapSizeLimitMutex;
5657 
5658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5659  VkPhysicalDeviceMemoryProperties m_MemProps;
5660 
5661  // Default pools.
5662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5663 
5664  // Each vector is sorted by memory (handle value).
5665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5667  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5668 
5669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5671  ~VmaAllocator_T();
5672 
5673  const VkAllocationCallbacks* GetAllocationCallbacks() const
5674  {
5675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5676  }
5677  const VmaVulkanFunctions& GetVulkanFunctions() const
5678  {
5679  return m_VulkanFunctions;
5680  }
5681 
5682  VkDeviceSize GetBufferImageGranularity() const
5683  {
5684  return VMA_MAX(
5685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5687  }
5688 
5689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5691 
5692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5693  {
5694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5696  }
5697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5699  {
5700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5702  }
5703  // Minimum alignment for all allocations in specific memory type.
5704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5705  {
5706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5709  }
5710 
5711  bool IsIntegratedGpu() const
5712  {
5713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5714  }
5715 
5716 #if VMA_RECORDING_ENABLED
5717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5718 #endif
5719 
5720  void GetBufferMemoryRequirements(
5721  VkBuffer hBuffer,
5722  VkMemoryRequirements& memReq,
5723  bool& requiresDedicatedAllocation,
5724  bool& prefersDedicatedAllocation) const;
5725  void GetImageMemoryRequirements(
5726  VkImage hImage,
5727  VkMemoryRequirements& memReq,
5728  bool& requiresDedicatedAllocation,
5729  bool& prefersDedicatedAllocation) const;
5730 
5731  // Main allocation function.
5732  VkResult AllocateMemory(
5733  const VkMemoryRequirements& vkMemReq,
5734  bool requiresDedicatedAllocation,
5735  bool prefersDedicatedAllocation,
5736  VkBuffer dedicatedBuffer,
5737  VkImage dedicatedImage,
5738  const VmaAllocationCreateInfo& createInfo,
5739  VmaSuballocationType suballocType,
5740  VmaAllocation* pAllocation);
5741 
5742  // Main deallocation function.
5743  void FreeMemory(const VmaAllocation allocation);
5744 
5745  void CalculateStats(VmaStats* pStats);
5746 
5747 #if VMA_STATS_STRING_ENABLED
5748  void PrintDetailedMap(class VmaJsonWriter& json);
5749 #endif
5750 
5751  VkResult Defragment(
5752  VmaAllocation* pAllocations,
5753  size_t allocationCount,
5754  VkBool32* pAllocationsChanged,
5755  const VmaDefragmentationInfo* pDefragmentationInfo,
5756  VmaDefragmentationStats* pDefragmentationStats);
5757 
5758  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5759  bool TouchAllocation(VmaAllocation hAllocation);
5760 
5761  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5762  void DestroyPool(VmaPool pool);
5763  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5764 
5765  void SetCurrentFrameIndex(uint32_t frameIndex);
5766  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5767 
5768  void MakePoolAllocationsLost(
5769  VmaPool hPool,
5770  size_t* pLostAllocationCount);
5771  VkResult CheckPoolCorruption(VmaPool hPool);
5772  VkResult CheckCorruption(uint32_t memoryTypeBits);
5773 
5774  void CreateLostAllocation(VmaAllocation* pAllocation);
5775 
5776  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5777  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5778 
5779  VkResult Map(VmaAllocation hAllocation, void** ppData);
5780  void Unmap(VmaAllocation hAllocation);
5781 
5782  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5783  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5784 
5785  void FlushOrInvalidateAllocation(
5786  VmaAllocation hAllocation,
5787  VkDeviceSize offset, VkDeviceSize size,
5788  VMA_CACHE_OPERATION op);
5789 
5790  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5791 
5792 private:
5793  VkDeviceSize m_PreferredLargeHeapBlockSize;
5794 
5795  VkPhysicalDevice m_PhysicalDevice;
5796  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5797 
5798  VMA_MUTEX m_PoolsMutex;
5799  // Protected by m_PoolsMutex. Sorted by pointer value.
5800  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5801  uint32_t m_NextPoolId;
5802 
5803  VmaVulkanFunctions m_VulkanFunctions;
5804 
5805 #if VMA_RECORDING_ENABLED
5806  VmaRecorder* m_pRecorder;
5807 #endif
5808 
5809  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5810 
5811  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5812 
5813  VkResult AllocateMemoryOfType(
5814  VkDeviceSize size,
5815  VkDeviceSize alignment,
5816  bool dedicatedAllocation,
5817  VkBuffer dedicatedBuffer,
5818  VkImage dedicatedImage,
5819  const VmaAllocationCreateInfo& createInfo,
5820  uint32_t memTypeIndex,
5821  VmaSuballocationType suballocType,
5822  VmaAllocation* pAllocation);
5823 
5824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5825  VkResult AllocateDedicatedMemory(
5826  VkDeviceSize size,
5827  VmaSuballocationType suballocType,
5828  uint32_t memTypeIndex,
5829  bool map,
5830  bool isUserDataString,
5831  void* pUserData,
5832  VkBuffer dedicatedBuffer,
5833  VkImage dedicatedImage,
5834  VmaAllocation* pAllocation);
5835 
5836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5837  void FreeDedicatedMemory(VmaAllocation allocation);
5838 };
5839 
5841 // Memory allocation #2 after VmaAllocator_T definition
5842 
5843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5844 {
5845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5846 }
5847 
5848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5849 {
5850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5851 }
5852 
5853 template<typename T>
5854 static T* VmaAllocate(VmaAllocator hAllocator)
5855 {
5856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5857 }
5858 
5859 template<typename T>
5860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5861 {
5862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5863 }
5864 
5865 template<typename T>
5866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5867 {
5868  if(ptr != VMA_NULL)
5869  {
5870  ptr->~T();
5871  VmaFree(hAllocator, ptr);
5872  }
5873 }
5874 
5875 template<typename T>
5876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5877 {
5878  if(ptr != VMA_NULL)
5879  {
5880  for(size_t i = count; i--; )
5881  ptr[i].~T();
5882  VmaFree(hAllocator, ptr);
5883  }
5884 }
5885 
5887 // VmaStringBuilder
5888 
5889 #if VMA_STATS_STRING_ENABLED
5890 
5891 class VmaStringBuilder
5892 {
5893 public:
5894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5895  size_t GetLength() const { return m_Data.size(); }
5896  const char* GetData() const { return m_Data.data(); }
5897 
5898  void Add(char ch) { m_Data.push_back(ch); }
5899  void Add(const char* pStr);
5900  void AddNewLine() { Add('\n'); }
5901  void AddNumber(uint32_t num);
5902  void AddNumber(uint64_t num);
5903  void AddPointer(const void* ptr);
5904 
5905 private:
5906  VmaVector< char, VmaStlAllocator<char> > m_Data;
5907 };
5908 
5909 void VmaStringBuilder::Add(const char* pStr)
5910 {
5911  const size_t strLen = strlen(pStr);
5912  if(strLen > 0)
5913  {
5914  const size_t oldCount = m_Data.size();
5915  m_Data.resize(oldCount + strLen);
5916  memcpy(m_Data.data() + oldCount, pStr, strLen);
5917  }
5918 }
5919 
5920 void VmaStringBuilder::AddNumber(uint32_t num)
5921 {
5922  char buf[11];
5923  VmaUint32ToStr(buf, sizeof(buf), num);
5924  Add(buf);
5925 }
5926 
5927 void VmaStringBuilder::AddNumber(uint64_t num)
5928 {
5929  char buf[21];
5930  VmaUint64ToStr(buf, sizeof(buf), num);
5931  Add(buf);
5932 }
5933 
5934 void VmaStringBuilder::AddPointer(const void* ptr)
5935 {
5936  char buf[21];
5937  VmaPtrToStr(buf, sizeof(buf), ptr);
5938  Add(buf);
5939 }
5940 
5941 #endif // #if VMA_STATS_STRING_ENABLED
5942 
5944 // VmaJsonWriter
5945 
5946 #if VMA_STATS_STRING_ENABLED
5947 
5948 class VmaJsonWriter
5949 {
5950  VMA_CLASS_NO_COPY(VmaJsonWriter)
5951 public:
5952  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5953  ~VmaJsonWriter();
5954 
5955  void BeginObject(bool singleLine = false);
5956  void EndObject();
5957 
5958  void BeginArray(bool singleLine = false);
5959  void EndArray();
5960 
5961  void WriteString(const char* pStr);
5962  void BeginString(const char* pStr = VMA_NULL);
5963  void ContinueString(const char* pStr);
5964  void ContinueString(uint32_t n);
5965  void ContinueString(uint64_t n);
5966  void ContinueString_Pointer(const void* ptr);
5967  void EndString(const char* pStr = VMA_NULL);
5968 
5969  void WriteNumber(uint32_t n);
5970  void WriteNumber(uint64_t n);
5971  void WriteBool(bool b);
5972  void WriteNull();
5973 
5974 private:
5975  static const char* const INDENT;
5976 
5977  enum COLLECTION_TYPE
5978  {
5979  COLLECTION_TYPE_OBJECT,
5980  COLLECTION_TYPE_ARRAY,
5981  };
5982  struct StackItem
5983  {
5984  COLLECTION_TYPE type;
5985  uint32_t valueCount;
5986  bool singleLineMode;
5987  };
5988 
5989  VmaStringBuilder& m_SB;
5990  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5991  bool m_InsideString;
5992 
5993  void BeginValue(bool isString);
5994  void WriteIndent(bool oneLess = false);
5995 };
5996 
5997 const char* const VmaJsonWriter::INDENT = " ";
5998 
5999 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6000  m_SB(sb),
6001  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6002  m_InsideString(false)
6003 {
6004 }
6005 
6006 VmaJsonWriter::~VmaJsonWriter()
6007 {
6008  VMA_ASSERT(!m_InsideString);
6009  VMA_ASSERT(m_Stack.empty());
6010 }
6011 
6012 void VmaJsonWriter::BeginObject(bool singleLine)
6013 {
6014  VMA_ASSERT(!m_InsideString);
6015 
6016  BeginValue(false);
6017  m_SB.Add('{');
6018 
6019  StackItem item;
6020  item.type = COLLECTION_TYPE_OBJECT;
6021  item.valueCount = 0;
6022  item.singleLineMode = singleLine;
6023  m_Stack.push_back(item);
6024 }
6025 
6026 void VmaJsonWriter::EndObject()
6027 {
6028  VMA_ASSERT(!m_InsideString);
6029 
6030  WriteIndent(true);
6031  m_SB.Add('}');
6032 
6033  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6034  m_Stack.pop_back();
6035 }
6036 
6037 void VmaJsonWriter::BeginArray(bool singleLine)
6038 {
6039  VMA_ASSERT(!m_InsideString);
6040 
6041  BeginValue(false);
6042  m_SB.Add('[');
6043 
6044  StackItem item;
6045  item.type = COLLECTION_TYPE_ARRAY;
6046  item.valueCount = 0;
6047  item.singleLineMode = singleLine;
6048  m_Stack.push_back(item);
6049 }
6050 
6051 void VmaJsonWriter::EndArray()
6052 {
6053  VMA_ASSERT(!m_InsideString);
6054 
6055  WriteIndent(true);
6056  m_SB.Add(']');
6057 
6058  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6059  m_Stack.pop_back();
6060 }
6061 
6062 void VmaJsonWriter::WriteString(const char* pStr)
6063 {
6064  BeginString(pStr);
6065  EndString();
6066 }
6067 
6068 void VmaJsonWriter::BeginString(const char* pStr)
6069 {
6070  VMA_ASSERT(!m_InsideString);
6071 
6072  BeginValue(true);
6073  m_SB.Add('"');
6074  m_InsideString = true;
6075  if(pStr != VMA_NULL && pStr[0] != '\0')
6076  {
6077  ContinueString(pStr);
6078  }
6079 }
6080 
6081 void VmaJsonWriter::ContinueString(const char* pStr)
6082 {
6083  VMA_ASSERT(m_InsideString);
6084 
6085  const size_t strLen = strlen(pStr);
6086  for(size_t i = 0; i < strLen; ++i)
6087  {
6088  char ch = pStr[i];
6089  if(ch == '\\')
6090  {
6091  m_SB.Add("\\\\");
6092  }
6093  else if(ch == '"')
6094  {
6095  m_SB.Add("\\\"");
6096  }
6097  else if(ch >= 32)
6098  {
6099  m_SB.Add(ch);
6100  }
6101  else switch(ch)
6102  {
6103  case '\b':
6104  m_SB.Add("\\b");
6105  break;
6106  case '\f':
6107  m_SB.Add("\\f");
6108  break;
6109  case '\n':
6110  m_SB.Add("\\n");
6111  break;
6112  case '\r':
6113  m_SB.Add("\\r");
6114  break;
6115  case '\t':
6116  m_SB.Add("\\t");
6117  break;
6118  default:
6119  VMA_ASSERT(0 && "Character not currently supported.");
6120  break;
6121  }
6122  }
6123 }
6124 
6125 void VmaJsonWriter::ContinueString(uint32_t n)
6126 {
6127  VMA_ASSERT(m_InsideString);
6128  m_SB.AddNumber(n);
6129 }
6130 
6131 void VmaJsonWriter::ContinueString(uint64_t n)
6132 {
6133  VMA_ASSERT(m_InsideString);
6134  m_SB.AddNumber(n);
6135 }
6136 
6137 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6138 {
6139  VMA_ASSERT(m_InsideString);
6140  m_SB.AddPointer(ptr);
6141 }
6142 
6143 void VmaJsonWriter::EndString(const char* pStr)
6144 {
6145  VMA_ASSERT(m_InsideString);
6146  if(pStr != VMA_NULL && pStr[0] != '\0')
6147  {
6148  ContinueString(pStr);
6149  }
6150  m_SB.Add('"');
6151  m_InsideString = false;
6152 }
6153 
6154 void VmaJsonWriter::WriteNumber(uint32_t n)
6155 {
6156  VMA_ASSERT(!m_InsideString);
6157  BeginValue(false);
6158  m_SB.AddNumber(n);
6159 }
6160 
6161 void VmaJsonWriter::WriteNumber(uint64_t n)
6162 {
6163  VMA_ASSERT(!m_InsideString);
6164  BeginValue(false);
6165  m_SB.AddNumber(n);
6166 }
6167 
6168 void VmaJsonWriter::WriteBool(bool b)
6169 {
6170  VMA_ASSERT(!m_InsideString);
6171  BeginValue(false);
6172  m_SB.Add(b ? "true" : "false");
6173 }
6174 
6175 void VmaJsonWriter::WriteNull()
6176 {
6177  VMA_ASSERT(!m_InsideString);
6178  BeginValue(false);
6179  m_SB.Add("null");
6180 }
6181 
6182 void VmaJsonWriter::BeginValue(bool isString)
6183 {
6184  if(!m_Stack.empty())
6185  {
6186  StackItem& currItem = m_Stack.back();
6187  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6188  currItem.valueCount % 2 == 0)
6189  {
6190  VMA_ASSERT(isString);
6191  }
6192 
6193  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6194  currItem.valueCount % 2 != 0)
6195  {
6196  m_SB.Add(": ");
6197  }
6198  else if(currItem.valueCount > 0)
6199  {
6200  m_SB.Add(", ");
6201  WriteIndent();
6202  }
6203  else
6204  {
6205  WriteIndent();
6206  }
6207  ++currItem.valueCount;
6208  }
6209 }
6210 
6211 void VmaJsonWriter::WriteIndent(bool oneLess)
6212 {
6213  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6214  {
6215  m_SB.AddNewLine();
6216 
6217  size_t count = m_Stack.size();
6218  if(count > 0 && oneLess)
6219  {
6220  --count;
6221  }
6222  for(size_t i = 0; i < count; ++i)
6223  {
6224  m_SB.Add(INDENT);
6225  }
6226  }
6227 }
6228 
6229 #endif // #if VMA_STATS_STRING_ENABLED
6230 
6232 
6233 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6234 {
6235  if(IsUserDataString())
6236  {
6237  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6238 
6239  FreeUserDataString(hAllocator);
6240 
6241  if(pUserData != VMA_NULL)
6242  {
6243  const char* const newStrSrc = (char*)pUserData;
6244  const size_t newStrLen = strlen(newStrSrc);
6245  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6246  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6247  m_pUserData = newStrDst;
6248  }
6249  }
6250  else
6251  {
6252  m_pUserData = pUserData;
6253  }
6254 }
6255 
6256 void VmaAllocation_T::ChangeBlockAllocation(
6257  VmaAllocator hAllocator,
6258  VmaDeviceMemoryBlock* block,
6259  VkDeviceSize offset)
6260 {
6261  VMA_ASSERT(block != VMA_NULL);
6262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6263 
6264  // Move mapping reference counter from old block to new block.
6265  if(block != m_BlockAllocation.m_Block)
6266  {
6267  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6268  if(IsPersistentMap())
6269  ++mapRefCount;
6270  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6271  block->Map(hAllocator, mapRefCount, VMA_NULL);
6272  }
6273 
6274  m_BlockAllocation.m_Block = block;
6275  m_BlockAllocation.m_Offset = offset;
6276 }
6277 
6278 VkDeviceSize VmaAllocation_T::GetOffset() const
6279 {
6280  switch(m_Type)
6281  {
6282  case ALLOCATION_TYPE_BLOCK:
6283  return m_BlockAllocation.m_Offset;
6284  case ALLOCATION_TYPE_DEDICATED:
6285  return 0;
6286  default:
6287  VMA_ASSERT(0);
6288  return 0;
6289  }
6290 }
6291 
6292 VkDeviceMemory VmaAllocation_T::GetMemory() const
6293 {
6294  switch(m_Type)
6295  {
6296  case ALLOCATION_TYPE_BLOCK:
6297  return m_BlockAllocation.m_Block->GetDeviceMemory();
6298  case ALLOCATION_TYPE_DEDICATED:
6299  return m_DedicatedAllocation.m_hMemory;
6300  default:
6301  VMA_ASSERT(0);
6302  return VK_NULL_HANDLE;
6303  }
6304 }
6305 
6306 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6307 {
6308  switch(m_Type)
6309  {
6310  case ALLOCATION_TYPE_BLOCK:
6311  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6312  case ALLOCATION_TYPE_DEDICATED:
6313  return m_DedicatedAllocation.m_MemoryTypeIndex;
6314  default:
6315  VMA_ASSERT(0);
6316  return UINT32_MAX;
6317  }
6318 }
6319 
6320 void* VmaAllocation_T::GetMappedData() const
6321 {
6322  switch(m_Type)
6323  {
6324  case ALLOCATION_TYPE_BLOCK:
6325  if(m_MapCount != 0)
6326  {
6327  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6328  VMA_ASSERT(pBlockData != VMA_NULL);
6329  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6330  }
6331  else
6332  {
6333  return VMA_NULL;
6334  }
6335  break;
6336  case ALLOCATION_TYPE_DEDICATED:
6337  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6338  return m_DedicatedAllocation.m_pMappedData;
6339  default:
6340  VMA_ASSERT(0);
6341  return VMA_NULL;
6342  }
6343 }
6344 
6345 bool VmaAllocation_T::CanBecomeLost() const
6346 {
6347  switch(m_Type)
6348  {
6349  case ALLOCATION_TYPE_BLOCK:
6350  return m_BlockAllocation.m_CanBecomeLost;
6351  case ALLOCATION_TYPE_DEDICATED:
6352  return false;
6353  default:
6354  VMA_ASSERT(0);
6355  return false;
6356  }
6357 }
6358 
6359 VmaPool VmaAllocation_T::GetPool() const
6360 {
6361  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6362  return m_BlockAllocation.m_hPool;
6363 }
6364 
6365 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6366 {
6367  VMA_ASSERT(CanBecomeLost());
6368 
6369  /*
6370  Warning: This is a carefully designed algorithm.
6371  Do not modify unless you really know what you're doing :)
6372  */
6373  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6374  for(;;)
6375  {
6376  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6377  {
6378  VMA_ASSERT(0);
6379  return false;
6380  }
6381  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6382  {
6383  return false;
6384  }
6385  else // Last use time earlier than current time.
6386  {
6387  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6388  {
6389  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6390  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6391  return true;
6392  }
6393  }
6394  }
6395 }
6396 
6397 #if VMA_STATS_STRING_ENABLED
6398 
6399 // Correspond to values of enum VmaSuballocationType.
6400 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6401  "FREE",
6402  "UNKNOWN",
6403  "BUFFER",
6404  "IMAGE_UNKNOWN",
6405  "IMAGE_LINEAR",
6406  "IMAGE_OPTIMAL",
6407 };
6408 
6409 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6410 {
6411  json.WriteString("Type");
6412  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6413 
6414  json.WriteString("Size");
6415  json.WriteNumber(m_Size);
6416 
6417  if(m_pUserData != VMA_NULL)
6418  {
6419  json.WriteString("UserData");
6420  if(IsUserDataString())
6421  {
6422  json.WriteString((const char*)m_pUserData);
6423  }
6424  else
6425  {
6426  json.BeginString();
6427  json.ContinueString_Pointer(m_pUserData);
6428  json.EndString();
6429  }
6430  }
6431 
6432  json.WriteString("CreationFrameIndex");
6433  json.WriteNumber(m_CreationFrameIndex);
6434 
6435  json.WriteString("LastUseFrameIndex");
6436  json.WriteNumber(GetLastUseFrameIndex());
6437 
6438  if(m_BufferImageUsage != 0)
6439  {
6440  json.WriteString("Usage");
6441  json.WriteNumber(m_BufferImageUsage);
6442  }
6443 }
6444 
6445 #endif
6446 
6447 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6448 {
6449  VMA_ASSERT(IsUserDataString());
6450  if(m_pUserData != VMA_NULL)
6451  {
6452  char* const oldStr = (char*)m_pUserData;
6453  const size_t oldStrLen = strlen(oldStr);
6454  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6455  m_pUserData = VMA_NULL;
6456  }
6457 }
6458 
6459 void VmaAllocation_T::BlockAllocMap()
6460 {
6461  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6462 
6463  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6464  {
6465  ++m_MapCount;
6466  }
6467  else
6468  {
6469  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6470  }
6471 }
6472 
6473 void VmaAllocation_T::BlockAllocUnmap()
6474 {
6475  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6476 
6477  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6478  {
6479  --m_MapCount;
6480  }
6481  else
6482  {
6483  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6484  }
6485 }
6486 
6487 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6488 {
6489  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6490 
6491  if(m_MapCount != 0)
6492  {
6493  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6494  {
6495  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6496  *ppData = m_DedicatedAllocation.m_pMappedData;
6497  ++m_MapCount;
6498  return VK_SUCCESS;
6499  }
6500  else
6501  {
6502  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6503  return VK_ERROR_MEMORY_MAP_FAILED;
6504  }
6505  }
6506  else
6507  {
6508  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6509  hAllocator->m_hDevice,
6510  m_DedicatedAllocation.m_hMemory,
6511  0, // offset
6512  VK_WHOLE_SIZE,
6513  0, // flags
6514  ppData);
6515  if(result == VK_SUCCESS)
6516  {
6517  m_DedicatedAllocation.m_pMappedData = *ppData;
6518  m_MapCount = 1;
6519  }
6520  return result;
6521  }
6522 }
6523 
6524 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6525 {
6526  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6527 
6528  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6529  {
6530  --m_MapCount;
6531  if(m_MapCount == 0)
6532  {
6533  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6534  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6535  hAllocator->m_hDevice,
6536  m_DedicatedAllocation.m_hMemory);
6537  }
6538  }
6539  else
6540  {
6541  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6542  }
6543 }
6544 
6545 #if VMA_STATS_STRING_ENABLED
6546 
6547 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6548 {
6549  json.BeginObject();
6550 
6551  json.WriteString("Blocks");
6552  json.WriteNumber(stat.blockCount);
6553 
6554  json.WriteString("Allocations");
6555  json.WriteNumber(stat.allocationCount);
6556 
6557  json.WriteString("UnusedRanges");
6558  json.WriteNumber(stat.unusedRangeCount);
6559 
6560  json.WriteString("UsedBytes");
6561  json.WriteNumber(stat.usedBytes);
6562 
6563  json.WriteString("UnusedBytes");
6564  json.WriteNumber(stat.unusedBytes);
6565 
6566  if(stat.allocationCount > 1)
6567  {
6568  json.WriteString("AllocationSize");
6569  json.BeginObject(true);
6570  json.WriteString("Min");
6571  json.WriteNumber(stat.allocationSizeMin);
6572  json.WriteString("Avg");
6573  json.WriteNumber(stat.allocationSizeAvg);
6574  json.WriteString("Max");
6575  json.WriteNumber(stat.allocationSizeMax);
6576  json.EndObject();
6577  }
6578 
6579  if(stat.unusedRangeCount > 1)
6580  {
6581  json.WriteString("UnusedRangeSize");
6582  json.BeginObject(true);
6583  json.WriteString("Min");
6584  json.WriteNumber(stat.unusedRangeSizeMin);
6585  json.WriteString("Avg");
6586  json.WriteNumber(stat.unusedRangeSizeAvg);
6587  json.WriteString("Max");
6588  json.WriteNumber(stat.unusedRangeSizeMax);
6589  json.EndObject();
6590  }
6591 
6592  json.EndObject();
6593 }
6594 
6595 #endif // #if VMA_STATS_STRING_ENABLED
6596 
6597 struct VmaSuballocationItemSizeLess
6598 {
6599  bool operator()(
6600  const VmaSuballocationList::iterator lhs,
6601  const VmaSuballocationList::iterator rhs) const
6602  {
6603  return lhs->size < rhs->size;
6604  }
6605  bool operator()(
6606  const VmaSuballocationList::iterator lhs,
6607  VkDeviceSize rhsSize) const
6608  {
6609  return lhs->size < rhsSize;
6610  }
6611 };
6612 
6613 
6615 // class VmaBlockMetadata
6616 
6617 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6618  m_Size(0),
6619  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6620 {
6621 }
6622 
6623 #if VMA_STATS_STRING_ENABLED
6624 
6625 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6626  VkDeviceSize unusedBytes,
6627  size_t allocationCount,
6628  size_t unusedRangeCount) const
6629 {
6630  json.BeginObject();
6631 
6632  json.WriteString("TotalBytes");
6633  json.WriteNumber(GetSize());
6634 
6635  json.WriteString("UnusedBytes");
6636  json.WriteNumber(unusedBytes);
6637 
6638  json.WriteString("Allocations");
6639  json.WriteNumber((uint64_t)allocationCount);
6640 
6641  json.WriteString("UnusedRanges");
6642  json.WriteNumber((uint64_t)unusedRangeCount);
6643 
6644  json.WriteString("Suballocations");
6645  json.BeginArray();
6646 }
6647 
6648 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6649  VkDeviceSize offset,
6650  VmaAllocation hAllocation) const
6651 {
6652  json.BeginObject(true);
6653 
6654  json.WriteString("Offset");
6655  json.WriteNumber(offset);
6656 
6657  hAllocation->PrintParameters(json);
6658 
6659  json.EndObject();
6660 }
6661 
6662 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6663  VkDeviceSize offset,
6664  VkDeviceSize size) const
6665 {
6666  json.BeginObject(true);
6667 
6668  json.WriteString("Offset");
6669  json.WriteNumber(offset);
6670 
6671  json.WriteString("Type");
6672  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6673 
6674  json.WriteString("Size");
6675  json.WriteNumber(size);
6676 
6677  json.EndObject();
6678 }
6679 
6680 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6681 {
6682  json.EndArray();
6683  json.EndObject();
6684 }
6685 
6686 #endif // #if VMA_STATS_STRING_ENABLED
6687 
6689 // class VmaBlockMetadata_Generic
6690 
6691 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6692  VmaBlockMetadata(hAllocator),
6693  m_FreeCount(0),
6694  m_SumFreeSize(0),
6695  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6696  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6697 {
6698 }
6699 
6700 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6701 {
6702 }
6703 
6704 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6705 {
6706  VmaBlockMetadata::Init(size);
6707 
6708  m_FreeCount = 1;
6709  m_SumFreeSize = size;
6710 
6711  VmaSuballocation suballoc = {};
6712  suballoc.offset = 0;
6713  suballoc.size = size;
6714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6715  suballoc.hAllocation = VK_NULL_HANDLE;
6716 
6717  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6718  m_Suballocations.push_back(suballoc);
6719  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6720  --suballocItem;
6721  m_FreeSuballocationsBySize.push_back(suballocItem);
6722 }
6723 
6724 bool VmaBlockMetadata_Generic::Validate() const
6725 {
6726  VMA_VALIDATE(!m_Suballocations.empty());
6727 
6728  // Expected offset of new suballocation as calculated from previous ones.
6729  VkDeviceSize calculatedOffset = 0;
6730  // Expected number of free suballocations as calculated from traversing their list.
6731  uint32_t calculatedFreeCount = 0;
6732  // Expected sum size of free suballocations as calculated from traversing their list.
6733  VkDeviceSize calculatedSumFreeSize = 0;
6734  // Expected number of free suballocations that should be registered in
6735  // m_FreeSuballocationsBySize calculated from traversing their list.
6736  size_t freeSuballocationsToRegister = 0;
6737  // True if previous visited suballocation was free.
6738  bool prevFree = false;
6739 
6740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6741  suballocItem != m_Suballocations.cend();
6742  ++suballocItem)
6743  {
6744  const VmaSuballocation& subAlloc = *suballocItem;
6745 
6746  // Actual offset of this suballocation doesn't match expected one.
6747  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6748 
6749  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6750  // Two adjacent free suballocations are invalid. They should be merged.
6751  VMA_VALIDATE(!prevFree || !currFree);
6752 
6753  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6754 
6755  if(currFree)
6756  {
6757  calculatedSumFreeSize += subAlloc.size;
6758  ++calculatedFreeCount;
6759  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6760  {
6761  ++freeSuballocationsToRegister;
6762  }
6763 
6764  // Margin required between allocations - every free space must be at least that large.
6765  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6766  }
6767  else
6768  {
6769  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6770  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6771 
6772  // Margin required between allocations - previous allocation must be free.
6773  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6774  }
6775 
6776  calculatedOffset += subAlloc.size;
6777  prevFree = currFree;
6778  }
6779 
6780  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6781  // match expected one.
6782  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6783 
6784  VkDeviceSize lastSize = 0;
6785  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6786  {
6787  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6788 
6789  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6790  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6791  // They must be sorted by size ascending.
6792  VMA_VALIDATE(suballocItem->size >= lastSize);
6793 
6794  lastSize = suballocItem->size;
6795  }
6796 
6797  // Check if totals match calculacted values.
6798  VMA_VALIDATE(ValidateFreeSuballocationList());
6799  VMA_VALIDATE(calculatedOffset == GetSize());
6800  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6801  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6802 
6803  return true;
6804 }
6805 
6806 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6807 {
6808  if(!m_FreeSuballocationsBySize.empty())
6809  {
6810  return m_FreeSuballocationsBySize.back()->size;
6811  }
6812  else
6813  {
6814  return 0;
6815  }
6816 }
6817 
6818 bool VmaBlockMetadata_Generic::IsEmpty() const
6819 {
6820  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6821 }
6822 
6823 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6824 {
6825  outInfo.blockCount = 1;
6826 
6827  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6828  outInfo.allocationCount = rangeCount - m_FreeCount;
6829  outInfo.unusedRangeCount = m_FreeCount;
6830 
6831  outInfo.unusedBytes = m_SumFreeSize;
6832  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6833 
6834  outInfo.allocationSizeMin = UINT64_MAX;
6835  outInfo.allocationSizeMax = 0;
6836  outInfo.unusedRangeSizeMin = UINT64_MAX;
6837  outInfo.unusedRangeSizeMax = 0;
6838 
6839  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6840  suballocItem != m_Suballocations.cend();
6841  ++suballocItem)
6842  {
6843  const VmaSuballocation& suballoc = *suballocItem;
6844  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6845  {
6846  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6847  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6848  }
6849  else
6850  {
6851  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6852  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6853  }
6854  }
6855 }
6856 
6857 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6858 {
6859  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6860 
6861  inoutStats.size += GetSize();
6862  inoutStats.unusedSize += m_SumFreeSize;
6863  inoutStats.allocationCount += rangeCount - m_FreeCount;
6864  inoutStats.unusedRangeCount += m_FreeCount;
6865  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6866 }
6867 
6868 #if VMA_STATS_STRING_ENABLED
6869 
6870 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6871 {
6872  PrintDetailedMap_Begin(json,
6873  m_SumFreeSize, // unusedBytes
6874  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6875  m_FreeCount); // unusedRangeCount
6876 
6877  size_t i = 0;
6878  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6879  suballocItem != m_Suballocations.cend();
6880  ++suballocItem, ++i)
6881  {
6882  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6883  {
6884  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6885  }
6886  else
6887  {
6888  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6889  }
6890  }
6891 
6892  PrintDetailedMap_End(json);
6893 }
6894 
6895 #endif // #if VMA_STATS_STRING_ENABLED
6896 
6897 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6898  uint32_t currentFrameIndex,
6899  uint32_t frameInUseCount,
6900  VkDeviceSize bufferImageGranularity,
6901  VkDeviceSize allocSize,
6902  VkDeviceSize allocAlignment,
6903  bool upperAddress,
6904  VmaSuballocationType allocType,
6905  bool canMakeOtherLost,
6906  uint32_t strategy,
6907  VmaAllocationRequest* pAllocationRequest)
6908 {
6909  VMA_ASSERT(allocSize > 0);
6910  VMA_ASSERT(!upperAddress);
6911  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6912  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6913  VMA_HEAVY_ASSERT(Validate());
6914 
6915  // There is not enough total free space in this block to fullfill the request: Early return.
6916  if(canMakeOtherLost == false &&
6917  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6918  {
6919  return false;
6920  }
6921 
6922  // New algorithm, efficiently searching freeSuballocationsBySize.
6923  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6924  if(freeSuballocCount > 0)
6925  {
6927  {
6928  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6929  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6930  m_FreeSuballocationsBySize.data(),
6931  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6932  allocSize + 2 * VMA_DEBUG_MARGIN,
6933  VmaSuballocationItemSizeLess());
6934  size_t index = it - m_FreeSuballocationsBySize.data();
6935  for(; index < freeSuballocCount; ++index)
6936  {
6937  if(CheckAllocation(
6938  currentFrameIndex,
6939  frameInUseCount,
6940  bufferImageGranularity,
6941  allocSize,
6942  allocAlignment,
6943  allocType,
6944  m_FreeSuballocationsBySize[index],
6945  false, // canMakeOtherLost
6946  &pAllocationRequest->offset,
6947  &pAllocationRequest->itemsToMakeLostCount,
6948  &pAllocationRequest->sumFreeSize,
6949  &pAllocationRequest->sumItemSize))
6950  {
6951  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6952  return true;
6953  }
6954  }
6955  }
6956  else // WORST_FIT, FIRST_FIT
6957  {
6958  // Search staring from biggest suballocations.
6959  for(size_t index = freeSuballocCount; index--; )
6960  {
6961  if(CheckAllocation(
6962  currentFrameIndex,
6963  frameInUseCount,
6964  bufferImageGranularity,
6965  allocSize,
6966  allocAlignment,
6967  allocType,
6968  m_FreeSuballocationsBySize[index],
6969  false, // canMakeOtherLost
6970  &pAllocationRequest->offset,
6971  &pAllocationRequest->itemsToMakeLostCount,
6972  &pAllocationRequest->sumFreeSize,
6973  &pAllocationRequest->sumItemSize))
6974  {
6975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6976  return true;
6977  }
6978  }
6979  }
6980  }
6981 
6982  if(canMakeOtherLost)
6983  {
6984  // Brute-force algorithm. TODO: Come up with something better.
6985 
6986  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6987  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6988 
6989  VmaAllocationRequest tmpAllocRequest = {};
6990  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6991  suballocIt != m_Suballocations.end();
6992  ++suballocIt)
6993  {
6994  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6995  suballocIt->hAllocation->CanBecomeLost())
6996  {
6997  if(CheckAllocation(
6998  currentFrameIndex,
6999  frameInUseCount,
7000  bufferImageGranularity,
7001  allocSize,
7002  allocAlignment,
7003  allocType,
7004  suballocIt,
7005  canMakeOtherLost,
7006  &tmpAllocRequest.offset,
7007  &tmpAllocRequest.itemsToMakeLostCount,
7008  &tmpAllocRequest.sumFreeSize,
7009  &tmpAllocRequest.sumItemSize))
7010  {
7011  tmpAllocRequest.item = suballocIt;
7012 
7013  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7015  {
7016  *pAllocationRequest = tmpAllocRequest;
7017  }
7018  }
7019  }
7020  }
7021 
7022  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7023  {
7024  return true;
7025  }
7026  }
7027 
7028  return false;
7029 }
7030 
7031 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7032  uint32_t currentFrameIndex,
7033  uint32_t frameInUseCount,
7034  VmaAllocationRequest* pAllocationRequest)
7035 {
7036  while(pAllocationRequest->itemsToMakeLostCount > 0)
7037  {
7038  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7039  {
7040  ++pAllocationRequest->item;
7041  }
7042  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7043  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7044  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7045  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7046  {
7047  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7048  --pAllocationRequest->itemsToMakeLostCount;
7049  }
7050  else
7051  {
7052  return false;
7053  }
7054  }
7055 
7056  VMA_HEAVY_ASSERT(Validate());
7057  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7058  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7059 
7060  return true;
7061 }
7062 
7063 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7064 {
7065  uint32_t lostAllocationCount = 0;
7066  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7067  it != m_Suballocations.end();
7068  ++it)
7069  {
7070  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7071  it->hAllocation->CanBecomeLost() &&
7072  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7073  {
7074  it = FreeSuballocation(it);
7075  ++lostAllocationCount;
7076  }
7077  }
7078  return lostAllocationCount;
7079 }
7080 
7081 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7082 {
7083  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7084  it != m_Suballocations.end();
7085  ++it)
7086  {
7087  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7088  {
7089  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7090  {
7091  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7092  return VK_ERROR_VALIDATION_FAILED_EXT;
7093  }
7094  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7095  {
7096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7097  return VK_ERROR_VALIDATION_FAILED_EXT;
7098  }
7099  }
7100  }
7101 
7102  return VK_SUCCESS;
7103 }
7104 
7105 void VmaBlockMetadata_Generic::Alloc(
7106  const VmaAllocationRequest& request,
7107  VmaSuballocationType type,
7108  VkDeviceSize allocSize,
7109  bool upperAddress,
7110  VmaAllocation hAllocation)
7111 {
7112  VMA_ASSERT(!upperAddress);
7113  VMA_ASSERT(request.item != m_Suballocations.end());
7114  VmaSuballocation& suballoc = *request.item;
7115  // Given suballocation is a free block.
7116  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7117  // Given offset is inside this suballocation.
7118  VMA_ASSERT(request.offset >= suballoc.offset);
7119  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7120  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7121  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7122 
7123  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7124  // it to become used.
7125  UnregisterFreeSuballocation(request.item);
7126 
7127  suballoc.offset = request.offset;
7128  suballoc.size = allocSize;
7129  suballoc.type = type;
7130  suballoc.hAllocation = hAllocation;
7131 
7132  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7133  if(paddingEnd)
7134  {
7135  VmaSuballocation paddingSuballoc = {};
7136  paddingSuballoc.offset = request.offset + allocSize;
7137  paddingSuballoc.size = paddingEnd;
7138  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7139  VmaSuballocationList::iterator next = request.item;
7140  ++next;
7141  const VmaSuballocationList::iterator paddingEndItem =
7142  m_Suballocations.insert(next, paddingSuballoc);
7143  RegisterFreeSuballocation(paddingEndItem);
7144  }
7145 
7146  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7147  if(paddingBegin)
7148  {
7149  VmaSuballocation paddingSuballoc = {};
7150  paddingSuballoc.offset = request.offset - paddingBegin;
7151  paddingSuballoc.size = paddingBegin;
7152  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7153  const VmaSuballocationList::iterator paddingBeginItem =
7154  m_Suballocations.insert(request.item, paddingSuballoc);
7155  RegisterFreeSuballocation(paddingBeginItem);
7156  }
7157 
7158  // Update totals.
7159  m_FreeCount = m_FreeCount - 1;
7160  if(paddingBegin > 0)
7161  {
7162  ++m_FreeCount;
7163  }
7164  if(paddingEnd > 0)
7165  {
7166  ++m_FreeCount;
7167  }
7168  m_SumFreeSize -= allocSize;
7169 }
7170 
7171 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7172 {
7173  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7174  suballocItem != m_Suballocations.end();
7175  ++suballocItem)
7176  {
7177  VmaSuballocation& suballoc = *suballocItem;
7178  if(suballoc.hAllocation == allocation)
7179  {
7180  FreeSuballocation(suballocItem);
7181  VMA_HEAVY_ASSERT(Validate());
7182  return;
7183  }
7184  }
7185  VMA_ASSERT(0 && "Not found!");
7186 }
7187 
7188 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7189 {
7190  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7191  suballocItem != m_Suballocations.end();
7192  ++suballocItem)
7193  {
7194  VmaSuballocation& suballoc = *suballocItem;
7195  if(suballoc.offset == offset)
7196  {
7197  FreeSuballocation(suballocItem);
7198  return;
7199  }
7200  }
7201  VMA_ASSERT(0 && "Not found!");
7202 }
7203 
7204 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7205 {
7206  VkDeviceSize lastSize = 0;
7207  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7208  {
7209  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7210 
7211  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7212  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7213  VMA_VALIDATE(it->size >= lastSize);
7214  lastSize = it->size;
7215  }
7216  return true;
7217 }
7218 
7219 bool VmaBlockMetadata_Generic::CheckAllocation(
7220  uint32_t currentFrameIndex,
7221  uint32_t frameInUseCount,
7222  VkDeviceSize bufferImageGranularity,
7223  VkDeviceSize allocSize,
7224  VkDeviceSize allocAlignment,
7225  VmaSuballocationType allocType,
7226  VmaSuballocationList::const_iterator suballocItem,
7227  bool canMakeOtherLost,
7228  VkDeviceSize* pOffset,
7229  size_t* itemsToMakeLostCount,
7230  VkDeviceSize* pSumFreeSize,
7231  VkDeviceSize* pSumItemSize) const
7232 {
7233  VMA_ASSERT(allocSize > 0);
7234  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7235  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7236  VMA_ASSERT(pOffset != VMA_NULL);
7237 
7238  *itemsToMakeLostCount = 0;
7239  *pSumFreeSize = 0;
7240  *pSumItemSize = 0;
7241 
7242  if(canMakeOtherLost)
7243  {
7244  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7245  {
7246  *pSumFreeSize = suballocItem->size;
7247  }
7248  else
7249  {
7250  if(suballocItem->hAllocation->CanBecomeLost() &&
7251  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7252  {
7253  ++*itemsToMakeLostCount;
7254  *pSumItemSize = suballocItem->size;
7255  }
7256  else
7257  {
7258  return false;
7259  }
7260  }
7261 
7262  // Remaining size is too small for this request: Early return.
7263  if(GetSize() - suballocItem->offset < allocSize)
7264  {
7265  return false;
7266  }
7267 
7268  // Start from offset equal to beginning of this suballocation.
7269  *pOffset = suballocItem->offset;
7270 
7271  // Apply VMA_DEBUG_MARGIN at the beginning.
7272  if(VMA_DEBUG_MARGIN > 0)
7273  {
7274  *pOffset += VMA_DEBUG_MARGIN;
7275  }
7276 
7277  // Apply alignment.
7278  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7279 
7280  // Check previous suballocations for BufferImageGranularity conflicts.
7281  // Make bigger alignment if necessary.
7282  if(bufferImageGranularity > 1)
7283  {
7284  bool bufferImageGranularityConflict = false;
7285  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7286  while(prevSuballocItem != m_Suballocations.cbegin())
7287  {
7288  --prevSuballocItem;
7289  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7290  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7291  {
7292  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7293  {
7294  bufferImageGranularityConflict = true;
7295  break;
7296  }
7297  }
7298  else
7299  // Already on previous page.
7300  break;
7301  }
7302  if(bufferImageGranularityConflict)
7303  {
7304  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7305  }
7306  }
7307 
7308  // Now that we have final *pOffset, check if we are past suballocItem.
7309  // If yes, return false - this function should be called for another suballocItem as starting point.
7310  if(*pOffset >= suballocItem->offset + suballocItem->size)
7311  {
7312  return false;
7313  }
7314 
7315  // Calculate padding at the beginning based on current offset.
7316  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7317 
7318  // Calculate required margin at the end.
7319  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7320 
7321  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7322  // Another early return check.
7323  if(suballocItem->offset + totalSize > GetSize())
7324  {
7325  return false;
7326  }
7327 
7328  // Advance lastSuballocItem until desired size is reached.
7329  // Update itemsToMakeLostCount.
7330  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7331  if(totalSize > suballocItem->size)
7332  {
7333  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7334  while(remainingSize > 0)
7335  {
7336  ++lastSuballocItem;
7337  if(lastSuballocItem == m_Suballocations.cend())
7338  {
7339  return false;
7340  }
7341  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7342  {
7343  *pSumFreeSize += lastSuballocItem->size;
7344  }
7345  else
7346  {
7347  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7348  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7349  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7350  {
7351  ++*itemsToMakeLostCount;
7352  *pSumItemSize += lastSuballocItem->size;
7353  }
7354  else
7355  {
7356  return false;
7357  }
7358  }
7359  remainingSize = (lastSuballocItem->size < remainingSize) ?
7360  remainingSize - lastSuballocItem->size : 0;
7361  }
7362  }
7363 
7364  // Check next suballocations for BufferImageGranularity conflicts.
7365  // If conflict exists, we must mark more allocations lost or fail.
7366  if(bufferImageGranularity > 1)
7367  {
7368  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7369  ++nextSuballocItem;
7370  while(nextSuballocItem != m_Suballocations.cend())
7371  {
7372  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7373  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7374  {
7375  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7376  {
7377  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7378  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7379  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7380  {
7381  ++*itemsToMakeLostCount;
7382  }
7383  else
7384  {
7385  return false;
7386  }
7387  }
7388  }
7389  else
7390  {
7391  // Already on next page.
7392  break;
7393  }
7394  ++nextSuballocItem;
7395  }
7396  }
7397  }
7398  else
7399  {
7400  const VmaSuballocation& suballoc = *suballocItem;
7401  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7402 
7403  *pSumFreeSize = suballoc.size;
7404 
7405  // Size of this suballocation is too small for this request: Early return.
7406  if(suballoc.size < allocSize)
7407  {
7408  return false;
7409  }
7410 
7411  // Start from offset equal to beginning of this suballocation.
7412  *pOffset = suballoc.offset;
7413 
7414  // Apply VMA_DEBUG_MARGIN at the beginning.
7415  if(VMA_DEBUG_MARGIN > 0)
7416  {
7417  *pOffset += VMA_DEBUG_MARGIN;
7418  }
7419 
7420  // Apply alignment.
7421  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7422 
7423  // Check previous suballocations for BufferImageGranularity conflicts.
7424  // Make bigger alignment if necessary.
7425  if(bufferImageGranularity > 1)
7426  {
7427  bool bufferImageGranularityConflict = false;
7428  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7429  while(prevSuballocItem != m_Suballocations.cbegin())
7430  {
7431  --prevSuballocItem;
7432  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7433  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7434  {
7435  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7436  {
7437  bufferImageGranularityConflict = true;
7438  break;
7439  }
7440  }
7441  else
7442  // Already on previous page.
7443  break;
7444  }
7445  if(bufferImageGranularityConflict)
7446  {
7447  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7448  }
7449  }
7450 
7451  // Calculate padding at the beginning based on current offset.
7452  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7453 
7454  // Calculate required margin at the end.
7455  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7456 
7457  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7458  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7459  {
7460  return false;
7461  }
7462 
7463  // Check next suballocations for BufferImageGranularity conflicts.
7464  // If conflict exists, allocation cannot be made here.
7465  if(bufferImageGranularity > 1)
7466  {
7467  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7468  ++nextSuballocItem;
7469  while(nextSuballocItem != m_Suballocations.cend())
7470  {
7471  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7472  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7473  {
7474  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7475  {
7476  return false;
7477  }
7478  }
7479  else
7480  {
7481  // Already on next page.
7482  break;
7483  }
7484  ++nextSuballocItem;
7485  }
7486  }
7487  }
7488 
7489  // All tests passed: Success. pOffset is already filled.
7490  return true;
7491 }
7492 
7493 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7494 {
7495  VMA_ASSERT(item != m_Suballocations.end());
7496  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7497 
7498  VmaSuballocationList::iterator nextItem = item;
7499  ++nextItem;
7500  VMA_ASSERT(nextItem != m_Suballocations.end());
7501  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7502 
7503  item->size += nextItem->size;
7504  --m_FreeCount;
7505  m_Suballocations.erase(nextItem);
7506 }
7507 
7508 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7509 {
7510  // Change this suballocation to be marked as free.
7511  VmaSuballocation& suballoc = *suballocItem;
7512  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7513  suballoc.hAllocation = VK_NULL_HANDLE;
7514 
7515  // Update totals.
7516  ++m_FreeCount;
7517  m_SumFreeSize += suballoc.size;
7518 
7519  // Merge with previous and/or next suballocation if it's also free.
7520  bool mergeWithNext = false;
7521  bool mergeWithPrev = false;
7522 
7523  VmaSuballocationList::iterator nextItem = suballocItem;
7524  ++nextItem;
7525  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7526  {
7527  mergeWithNext = true;
7528  }
7529 
7530  VmaSuballocationList::iterator prevItem = suballocItem;
7531  if(suballocItem != m_Suballocations.begin())
7532  {
7533  --prevItem;
7534  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7535  {
7536  mergeWithPrev = true;
7537  }
7538  }
7539 
7540  if(mergeWithNext)
7541  {
7542  UnregisterFreeSuballocation(nextItem);
7543  MergeFreeWithNext(suballocItem);
7544  }
7545 
7546  if(mergeWithPrev)
7547  {
7548  UnregisterFreeSuballocation(prevItem);
7549  MergeFreeWithNext(prevItem);
7550  RegisterFreeSuballocation(prevItem);
7551  return prevItem;
7552  }
7553  else
7554  {
7555  RegisterFreeSuballocation(suballocItem);
7556  return suballocItem;
7557  }
7558 }
7559 
7560 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7561 {
7562  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7563  VMA_ASSERT(item->size > 0);
7564 
7565  // You may want to enable this validation at the beginning or at the end of
7566  // this function, depending on what do you want to check.
7567  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7568 
7569  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7570  {
7571  if(m_FreeSuballocationsBySize.empty())
7572  {
7573  m_FreeSuballocationsBySize.push_back(item);
7574  }
7575  else
7576  {
7577  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7578  }
7579  }
7580 
7581  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7582 }
7583 
7584 
7585 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7586 {
7587  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7588  VMA_ASSERT(item->size > 0);
7589 
7590  // You may want to enable this validation at the beginning or at the end of
7591  // this function, depending on what do you want to check.
7592  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7593 
7594  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7595  {
7596  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7597  m_FreeSuballocationsBySize.data(),
7598  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7599  item,
7600  VmaSuballocationItemSizeLess());
7601  for(size_t index = it - m_FreeSuballocationsBySize.data();
7602  index < m_FreeSuballocationsBySize.size();
7603  ++index)
7604  {
7605  if(m_FreeSuballocationsBySize[index] == item)
7606  {
7607  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7608  return;
7609  }
7610  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7611  }
7612  VMA_ASSERT(0 && "Not found.");
7613  }
7614 
7615  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7616 }
7617 
7619 // class VmaBlockMetadata_Linear
7620 
7621 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7622  VmaBlockMetadata(hAllocator),
7623  m_SumFreeSize(0),
7624  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7625  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7626  m_1stVectorIndex(0),
7627  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7628  m_1stNullItemsBeginCount(0),
7629  m_1stNullItemsMiddleCount(0),
7630  m_2ndNullItemsCount(0)
7631 {
7632 }
7633 
7634 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7635 {
7636 }
7637 
7638 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7639 {
7640  VmaBlockMetadata::Init(size);
7641  m_SumFreeSize = size;
7642 }
7643 
7644 bool VmaBlockMetadata_Linear::Validate() const
7645 {
7646  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7647  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7648 
7649  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7650  VMA_VALIDATE(!suballocations1st.empty() ||
7651  suballocations2nd.empty() ||
7652  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7653 
7654  if(!suballocations1st.empty())
7655  {
7656  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7657  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7658  // Null item at the end should be just pop_back().
7659  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7660  }
7661  if(!suballocations2nd.empty())
7662  {
7663  // Null item at the end should be just pop_back().
7664  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7665  }
7666 
7667  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7668  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7669 
7670  VkDeviceSize sumUsedSize = 0;
7671  const size_t suballoc1stCount = suballocations1st.size();
7672  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7673 
7674  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7675  {
7676  const size_t suballoc2ndCount = suballocations2nd.size();
7677  size_t nullItem2ndCount = 0;
7678  for(size_t i = 0; i < suballoc2ndCount; ++i)
7679  {
7680  const VmaSuballocation& suballoc = suballocations2nd[i];
7681  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7682 
7683  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7684  VMA_VALIDATE(suballoc.offset >= offset);
7685 
7686  if(!currFree)
7687  {
7688  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7689  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7690  sumUsedSize += suballoc.size;
7691  }
7692  else
7693  {
7694  ++nullItem2ndCount;
7695  }
7696 
7697  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7698  }
7699 
7700  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7701  }
7702 
7703  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7704  {
7705  const VmaSuballocation& suballoc = suballocations1st[i];
7706  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7707  suballoc.hAllocation == VK_NULL_HANDLE);
7708  }
7709 
7710  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7711 
7712  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7713  {
7714  const VmaSuballocation& suballoc = suballocations1st[i];
7715  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7716 
7717  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7718  VMA_VALIDATE(suballoc.offset >= offset);
7719  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7720 
7721  if(!currFree)
7722  {
7723  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7724  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7725  sumUsedSize += suballoc.size;
7726  }
7727  else
7728  {
7729  ++nullItem1stCount;
7730  }
7731 
7732  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7733  }
7734  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7735 
7736  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7737  {
7738  const size_t suballoc2ndCount = suballocations2nd.size();
7739  size_t nullItem2ndCount = 0;
7740  for(size_t i = suballoc2ndCount; i--; )
7741  {
7742  const VmaSuballocation& suballoc = suballocations2nd[i];
7743  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7744 
7745  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7746  VMA_VALIDATE(suballoc.offset >= offset);
7747 
7748  if(!currFree)
7749  {
7750  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7751  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7752  sumUsedSize += suballoc.size;
7753  }
7754  else
7755  {
7756  ++nullItem2ndCount;
7757  }
7758 
7759  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7760  }
7761 
7762  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7763  }
7764 
7765  VMA_VALIDATE(offset <= GetSize());
7766  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7767 
7768  return true;
7769 }
7770 
7771 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7772 {
7773  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7774  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7775 }
7776 
7777 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7778 {
7779  const VkDeviceSize size = GetSize();
7780 
7781  /*
7782  We don't consider gaps inside allocation vectors with freed allocations because
7783  they are not suitable for reuse in linear allocator. We consider only space that
7784  is available for new allocations.
7785  */
7786  if(IsEmpty())
7787  {
7788  return size;
7789  }
7790 
7791  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7792 
7793  switch(m_2ndVectorMode)
7794  {
7795  case SECOND_VECTOR_EMPTY:
7796  /*
7797  Available space is after end of 1st, as well as before beginning of 1st (which
7798  whould make it a ring buffer).
7799  */
7800  {
7801  const size_t suballocations1stCount = suballocations1st.size();
7802  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7803  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7804  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7805  return VMA_MAX(
7806  firstSuballoc.offset,
7807  size - (lastSuballoc.offset + lastSuballoc.size));
7808  }
7809  break;
7810 
7811  case SECOND_VECTOR_RING_BUFFER:
7812  /*
7813  Available space is only between end of 2nd and beginning of 1st.
7814  */
7815  {
7816  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7817  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7818  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7819  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7820  }
7821  break;
7822 
7823  case SECOND_VECTOR_DOUBLE_STACK:
7824  /*
7825  Available space is only between end of 1st and top of 2nd.
7826  */
7827  {
7828  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7829  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7830  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7831  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7832  }
7833  break;
7834 
7835  default:
7836  VMA_ASSERT(0);
7837  return 0;
7838  }
7839 }
7840 
7841 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7842 {
7843  const VkDeviceSize size = GetSize();
7844  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7845  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7846  const size_t suballoc1stCount = suballocations1st.size();
7847  const size_t suballoc2ndCount = suballocations2nd.size();
7848 
7849  outInfo.blockCount = 1;
7850  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7851  outInfo.unusedRangeCount = 0;
7852  outInfo.usedBytes = 0;
7853  outInfo.allocationSizeMin = UINT64_MAX;
7854  outInfo.allocationSizeMax = 0;
7855  outInfo.unusedRangeSizeMin = UINT64_MAX;
7856  outInfo.unusedRangeSizeMax = 0;
7857 
7858  VkDeviceSize lastOffset = 0;
7859 
7860  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7861  {
7862  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7863  size_t nextAlloc2ndIndex = 0;
7864  while(lastOffset < freeSpace2ndTo1stEnd)
7865  {
7866  // Find next non-null allocation or move nextAllocIndex to the end.
7867  while(nextAlloc2ndIndex < suballoc2ndCount &&
7868  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7869  {
7870  ++nextAlloc2ndIndex;
7871  }
7872 
7873  // Found non-null allocation.
7874  if(nextAlloc2ndIndex < suballoc2ndCount)
7875  {
7876  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7877 
7878  // 1. Process free space before this allocation.
7879  if(lastOffset < suballoc.offset)
7880  {
7881  // There is free space from lastOffset to suballoc.offset.
7882  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7883  ++outInfo.unusedRangeCount;
7884  outInfo.unusedBytes += unusedRangeSize;
7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7886  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7887  }
7888 
7889  // 2. Process this allocation.
7890  // There is allocation with suballoc.offset, suballoc.size.
7891  outInfo.usedBytes += suballoc.size;
7892  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7893  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7894 
7895  // 3. Prepare for next iteration.
7896  lastOffset = suballoc.offset + suballoc.size;
7897  ++nextAlloc2ndIndex;
7898  }
7899  // We are at the end.
7900  else
7901  {
7902  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7903  if(lastOffset < freeSpace2ndTo1stEnd)
7904  {
7905  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7906  ++outInfo.unusedRangeCount;
7907  outInfo.unusedBytes += unusedRangeSize;
7908  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7909  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7910  }
7911 
7912  // End of loop.
7913  lastOffset = freeSpace2ndTo1stEnd;
7914  }
7915  }
7916  }
7917 
7918  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7919  const VkDeviceSize freeSpace1stTo2ndEnd =
7920  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7921  while(lastOffset < freeSpace1stTo2ndEnd)
7922  {
7923  // Find next non-null allocation or move nextAllocIndex to the end.
7924  while(nextAlloc1stIndex < suballoc1stCount &&
7925  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7926  {
7927  ++nextAlloc1stIndex;
7928  }
7929 
7930  // Found non-null allocation.
7931  if(nextAlloc1stIndex < suballoc1stCount)
7932  {
7933  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7934 
7935  // 1. Process free space before this allocation.
7936  if(lastOffset < suballoc.offset)
7937  {
7938  // There is free space from lastOffset to suballoc.offset.
7939  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7940  ++outInfo.unusedRangeCount;
7941  outInfo.unusedBytes += unusedRangeSize;
7942  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7943  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7944  }
7945 
7946  // 2. Process this allocation.
7947  // There is allocation with suballoc.offset, suballoc.size.
7948  outInfo.usedBytes += suballoc.size;
7949  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7950  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7951 
7952  // 3. Prepare for next iteration.
7953  lastOffset = suballoc.offset + suballoc.size;
7954  ++nextAlloc1stIndex;
7955  }
7956  // We are at the end.
7957  else
7958  {
7959  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7960  if(lastOffset < freeSpace1stTo2ndEnd)
7961  {
7962  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7963  ++outInfo.unusedRangeCount;
7964  outInfo.unusedBytes += unusedRangeSize;
7965  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7966  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7967  }
7968 
7969  // End of loop.
7970  lastOffset = freeSpace1stTo2ndEnd;
7971  }
7972  }
7973 
7974  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7975  {
7976  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7977  while(lastOffset < size)
7978  {
7979  // Find next non-null allocation or move nextAllocIndex to the end.
7980  while(nextAlloc2ndIndex != SIZE_MAX &&
7981  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7982  {
7983  --nextAlloc2ndIndex;
7984  }
7985 
7986  // Found non-null allocation.
7987  if(nextAlloc2ndIndex != SIZE_MAX)
7988  {
7989  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7990 
7991  // 1. Process free space before this allocation.
7992  if(lastOffset < suballoc.offset)
7993  {
7994  // There is free space from lastOffset to suballoc.offset.
7995  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7996  ++outInfo.unusedRangeCount;
7997  outInfo.unusedBytes += unusedRangeSize;
7998  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7999  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8000  }
8001 
8002  // 2. Process this allocation.
8003  // There is allocation with suballoc.offset, suballoc.size.
8004  outInfo.usedBytes += suballoc.size;
8005  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8006  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8007 
8008  // 3. Prepare for next iteration.
8009  lastOffset = suballoc.offset + suballoc.size;
8010  --nextAlloc2ndIndex;
8011  }
8012  // We are at the end.
8013  else
8014  {
8015  // There is free space from lastOffset to size.
8016  if(lastOffset < size)
8017  {
8018  const VkDeviceSize unusedRangeSize = size - lastOffset;
8019  ++outInfo.unusedRangeCount;
8020  outInfo.unusedBytes += unusedRangeSize;
8021  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8022  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8023  }
8024 
8025  // End of loop.
8026  lastOffset = size;
8027  }
8028  }
8029  }
8030 
8031  outInfo.unusedBytes = size - outInfo.usedBytes;
8032 }
8033 
8034 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8035 {
8036  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8038  const VkDeviceSize size = GetSize();
8039  const size_t suballoc1stCount = suballocations1st.size();
8040  const size_t suballoc2ndCount = suballocations2nd.size();
8041 
8042  inoutStats.size += size;
8043 
8044  VkDeviceSize lastOffset = 0;
8045 
8046  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8047  {
8048  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8049  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8050  while(lastOffset < freeSpace2ndTo1stEnd)
8051  {
8052  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8053  while(nextAlloc2ndIndex < suballoc2ndCount &&
8054  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8055  {
8056  ++nextAlloc2ndIndex;
8057  }
8058 
8059  // Found non-null allocation.
8060  if(nextAlloc2ndIndex < suballoc2ndCount)
8061  {
8062  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8063 
8064  // 1. Process free space before this allocation.
8065  if(lastOffset < suballoc.offset)
8066  {
8067  // There is free space from lastOffset to suballoc.offset.
8068  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8069  inoutStats.unusedSize += unusedRangeSize;
8070  ++inoutStats.unusedRangeCount;
8071  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8072  }
8073 
8074  // 2. Process this allocation.
8075  // There is allocation with suballoc.offset, suballoc.size.
8076  ++inoutStats.allocationCount;
8077 
8078  // 3. Prepare for next iteration.
8079  lastOffset = suballoc.offset + suballoc.size;
8080  ++nextAlloc2ndIndex;
8081  }
8082  // We are at the end.
8083  else
8084  {
8085  if(lastOffset < freeSpace2ndTo1stEnd)
8086  {
8087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8088  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8089  inoutStats.unusedSize += unusedRangeSize;
8090  ++inoutStats.unusedRangeCount;
8091  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8092  }
8093 
8094  // End of loop.
8095  lastOffset = freeSpace2ndTo1stEnd;
8096  }
8097  }
8098  }
8099 
8100  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8101  const VkDeviceSize freeSpace1stTo2ndEnd =
8102  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8103  while(lastOffset < freeSpace1stTo2ndEnd)
8104  {
8105  // Find next non-null allocation or move nextAllocIndex to the end.
8106  while(nextAlloc1stIndex < suballoc1stCount &&
8107  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8108  {
8109  ++nextAlloc1stIndex;
8110  }
8111 
8112  // Found non-null allocation.
8113  if(nextAlloc1stIndex < suballoc1stCount)
8114  {
8115  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8116 
8117  // 1. Process free space before this allocation.
8118  if(lastOffset < suballoc.offset)
8119  {
8120  // There is free space from lastOffset to suballoc.offset.
8121  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8122  inoutStats.unusedSize += unusedRangeSize;
8123  ++inoutStats.unusedRangeCount;
8124  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8125  }
8126 
8127  // 2. Process this allocation.
8128  // There is allocation with suballoc.offset, suballoc.size.
8129  ++inoutStats.allocationCount;
8130 
8131  // 3. Prepare for next iteration.
8132  lastOffset = suballoc.offset + suballoc.size;
8133  ++nextAlloc1stIndex;
8134  }
8135  // We are at the end.
8136  else
8137  {
8138  if(lastOffset < freeSpace1stTo2ndEnd)
8139  {
8140  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8141  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8142  inoutStats.unusedSize += unusedRangeSize;
8143  ++inoutStats.unusedRangeCount;
8144  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8145  }
8146 
8147  // End of loop.
8148  lastOffset = freeSpace1stTo2ndEnd;
8149  }
8150  }
8151 
8152  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8153  {
8154  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8155  while(lastOffset < size)
8156  {
8157  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8158  while(nextAlloc2ndIndex != SIZE_MAX &&
8159  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8160  {
8161  --nextAlloc2ndIndex;
8162  }
8163 
8164  // Found non-null allocation.
8165  if(nextAlloc2ndIndex != SIZE_MAX)
8166  {
8167  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8168 
8169  // 1. Process free space before this allocation.
8170  if(lastOffset < suballoc.offset)
8171  {
8172  // There is free space from lastOffset to suballoc.offset.
8173  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8174  inoutStats.unusedSize += unusedRangeSize;
8175  ++inoutStats.unusedRangeCount;
8176  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8177  }
8178 
8179  // 2. Process this allocation.
8180  // There is allocation with suballoc.offset, suballoc.size.
8181  ++inoutStats.allocationCount;
8182 
8183  // 3. Prepare for next iteration.
8184  lastOffset = suballoc.offset + suballoc.size;
8185  --nextAlloc2ndIndex;
8186  }
8187  // We are at the end.
8188  else
8189  {
8190  if(lastOffset < size)
8191  {
8192  // There is free space from lastOffset to size.
8193  const VkDeviceSize unusedRangeSize = size - lastOffset;
8194  inoutStats.unusedSize += unusedRangeSize;
8195  ++inoutStats.unusedRangeCount;
8196  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8197  }
8198 
8199  // End of loop.
8200  lastOffset = size;
8201  }
8202  }
8203  }
8204 }
8205 
8206 #if VMA_STATS_STRING_ENABLED
8207 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8208 {
8209  const VkDeviceSize size = GetSize();
8210  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8211  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8212  const size_t suballoc1stCount = suballocations1st.size();
8213  const size_t suballoc2ndCount = suballocations2nd.size();
8214 
8215  // FIRST PASS
8216 
8217  size_t unusedRangeCount = 0;
8218  VkDeviceSize usedBytes = 0;
8219 
8220  VkDeviceSize lastOffset = 0;
8221 
8222  size_t alloc2ndCount = 0;
8223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8224  {
8225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8226  size_t nextAlloc2ndIndex = 0;
8227  while(lastOffset < freeSpace2ndTo1stEnd)
8228  {
8229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8230  while(nextAlloc2ndIndex < suballoc2ndCount &&
8231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8232  {
8233  ++nextAlloc2ndIndex;
8234  }
8235 
8236  // Found non-null allocation.
8237  if(nextAlloc2ndIndex < suballoc2ndCount)
8238  {
8239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8240 
8241  // 1. Process free space before this allocation.
8242  if(lastOffset < suballoc.offset)
8243  {
8244  // There is free space from lastOffset to suballoc.offset.
8245  ++unusedRangeCount;
8246  }
8247 
8248  // 2. Process this allocation.
8249  // There is allocation with suballoc.offset, suballoc.size.
8250  ++alloc2ndCount;
8251  usedBytes += suballoc.size;
8252 
8253  // 3. Prepare for next iteration.
8254  lastOffset = suballoc.offset + suballoc.size;
8255  ++nextAlloc2ndIndex;
8256  }
8257  // We are at the end.
8258  else
8259  {
8260  if(lastOffset < freeSpace2ndTo1stEnd)
8261  {
8262  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8263  ++unusedRangeCount;
8264  }
8265 
8266  // End of loop.
8267  lastOffset = freeSpace2ndTo1stEnd;
8268  }
8269  }
8270  }
8271 
8272  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8273  size_t alloc1stCount = 0;
8274  const VkDeviceSize freeSpace1stTo2ndEnd =
8275  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8276  while(lastOffset < freeSpace1stTo2ndEnd)
8277  {
8278  // Find next non-null allocation or move nextAllocIndex to the end.
8279  while(nextAlloc1stIndex < suballoc1stCount &&
8280  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8281  {
8282  ++nextAlloc1stIndex;
8283  }
8284 
8285  // Found non-null allocation.
8286  if(nextAlloc1stIndex < suballoc1stCount)
8287  {
8288  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8289 
8290  // 1. Process free space before this allocation.
8291  if(lastOffset < suballoc.offset)
8292  {
8293  // There is free space from lastOffset to suballoc.offset.
8294  ++unusedRangeCount;
8295  }
8296 
8297  // 2. Process this allocation.
8298  // There is allocation with suballoc.offset, suballoc.size.
8299  ++alloc1stCount;
8300  usedBytes += suballoc.size;
8301 
8302  // 3. Prepare for next iteration.
8303  lastOffset = suballoc.offset + suballoc.size;
8304  ++nextAlloc1stIndex;
8305  }
8306  // We are at the end.
8307  else
8308  {
8309  if(lastOffset < size)
8310  {
8311  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8312  ++unusedRangeCount;
8313  }
8314 
8315  // End of loop.
8316  lastOffset = freeSpace1stTo2ndEnd;
8317  }
8318  }
8319 
8320  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8321  {
8322  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8323  while(lastOffset < size)
8324  {
8325  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8326  while(nextAlloc2ndIndex != SIZE_MAX &&
8327  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8328  {
8329  --nextAlloc2ndIndex;
8330  }
8331 
8332  // Found non-null allocation.
8333  if(nextAlloc2ndIndex != SIZE_MAX)
8334  {
8335  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8336 
8337  // 1. Process free space before this allocation.
8338  if(lastOffset < suballoc.offset)
8339  {
8340  // There is free space from lastOffset to suballoc.offset.
8341  ++unusedRangeCount;
8342  }
8343 
8344  // 2. Process this allocation.
8345  // There is allocation with suballoc.offset, suballoc.size.
8346  ++alloc2ndCount;
8347  usedBytes += suballoc.size;
8348 
8349  // 3. Prepare for next iteration.
8350  lastOffset = suballoc.offset + suballoc.size;
8351  --nextAlloc2ndIndex;
8352  }
8353  // We are at the end.
8354  else
8355  {
8356  if(lastOffset < size)
8357  {
8358  // There is free space from lastOffset to size.
8359  ++unusedRangeCount;
8360  }
8361 
8362  // End of loop.
8363  lastOffset = size;
8364  }
8365  }
8366  }
8367 
8368  const VkDeviceSize unusedBytes = size - usedBytes;
8369  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8370 
8371  // SECOND PASS
8372  lastOffset = 0;
8373 
8374  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8375  {
8376  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8377  size_t nextAlloc2ndIndex = 0;
8378  while(lastOffset < freeSpace2ndTo1stEnd)
8379  {
8380  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8381  while(nextAlloc2ndIndex < suballoc2ndCount &&
8382  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8383  {
8384  ++nextAlloc2ndIndex;
8385  }
8386 
8387  // Found non-null allocation.
8388  if(nextAlloc2ndIndex < suballoc2ndCount)
8389  {
8390  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8391 
8392  // 1. Process free space before this allocation.
8393  if(lastOffset < suballoc.offset)
8394  {
8395  // There is free space from lastOffset to suballoc.offset.
8396  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8397  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8398  }
8399 
8400  // 2. Process this allocation.
8401  // There is allocation with suballoc.offset, suballoc.size.
8402  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8403 
8404  // 3. Prepare for next iteration.
8405  lastOffset = suballoc.offset + suballoc.size;
8406  ++nextAlloc2ndIndex;
8407  }
8408  // We are at the end.
8409  else
8410  {
8411  if(lastOffset < freeSpace2ndTo1stEnd)
8412  {
8413  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8414  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8415  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8416  }
8417 
8418  // End of loop.
8419  lastOffset = freeSpace2ndTo1stEnd;
8420  }
8421  }
8422  }
8423 
8424  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8425  while(lastOffset < freeSpace1stTo2ndEnd)
8426  {
8427  // Find next non-null allocation or move nextAllocIndex to the end.
8428  while(nextAlloc1stIndex < suballoc1stCount &&
8429  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8430  {
8431  ++nextAlloc1stIndex;
8432  }
8433 
8434  // Found non-null allocation.
8435  if(nextAlloc1stIndex < suballoc1stCount)
8436  {
8437  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8438 
8439  // 1. Process free space before this allocation.
8440  if(lastOffset < suballoc.offset)
8441  {
8442  // There is free space from lastOffset to suballoc.offset.
8443  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8444  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8445  }
8446 
8447  // 2. Process this allocation.
8448  // There is allocation with suballoc.offset, suballoc.size.
8449  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8450 
8451  // 3. Prepare for next iteration.
8452  lastOffset = suballoc.offset + suballoc.size;
8453  ++nextAlloc1stIndex;
8454  }
8455  // We are at the end.
8456  else
8457  {
8458  if(lastOffset < freeSpace1stTo2ndEnd)
8459  {
8460  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8461  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8462  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8463  }
8464 
8465  // End of loop.
8466  lastOffset = freeSpace1stTo2ndEnd;
8467  }
8468  }
8469 
8470  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8471  {
8472  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8473  while(lastOffset < size)
8474  {
8475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8476  while(nextAlloc2ndIndex != SIZE_MAX &&
8477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8478  {
8479  --nextAlloc2ndIndex;
8480  }
8481 
8482  // Found non-null allocation.
8483  if(nextAlloc2ndIndex != SIZE_MAX)
8484  {
8485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8486 
8487  // 1. Process free space before this allocation.
8488  if(lastOffset < suballoc.offset)
8489  {
8490  // There is free space from lastOffset to suballoc.offset.
8491  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8492  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8493  }
8494 
8495  // 2. Process this allocation.
8496  // There is allocation with suballoc.offset, suballoc.size.
8497  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8498 
8499  // 3. Prepare for next iteration.
8500  lastOffset = suballoc.offset + suballoc.size;
8501  --nextAlloc2ndIndex;
8502  }
8503  // We are at the end.
8504  else
8505  {
8506  if(lastOffset < size)
8507  {
8508  // There is free space from lastOffset to size.
8509  const VkDeviceSize unusedRangeSize = size - lastOffset;
8510  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8511  }
8512 
8513  // End of loop.
8514  lastOffset = size;
8515  }
8516  }
8517  }
8518 
8519  PrintDetailedMap_End(json);
8520 }
8521 #endif // #if VMA_STATS_STRING_ENABLED
8522 
8523 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8524  uint32_t currentFrameIndex,
8525  uint32_t frameInUseCount,
8526  VkDeviceSize bufferImageGranularity,
8527  VkDeviceSize allocSize,
8528  VkDeviceSize allocAlignment,
8529  bool upperAddress,
8530  VmaSuballocationType allocType,
8531  bool canMakeOtherLost,
8532  uint32_t strategy,
8533  VmaAllocationRequest* pAllocationRequest)
8534 {
8535  VMA_ASSERT(allocSize > 0);
8536  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8537  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8538  VMA_HEAVY_ASSERT(Validate());
8539 
8540  const VkDeviceSize size = GetSize();
8541  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8542  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8543 
8544  if(upperAddress)
8545  {
8546  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8547  {
8548  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8549  return false;
8550  }
8551 
8552  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8553  if(allocSize > size)
8554  {
8555  return false;
8556  }
8557  VkDeviceSize resultBaseOffset = size - allocSize;
8558  if(!suballocations2nd.empty())
8559  {
8560  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8561  resultBaseOffset = lastSuballoc.offset - allocSize;
8562  if(allocSize > lastSuballoc.offset)
8563  {
8564  return false;
8565  }
8566  }
8567 
8568  // Start from offset equal to end of free space.
8569  VkDeviceSize resultOffset = resultBaseOffset;
8570 
8571  // Apply VMA_DEBUG_MARGIN at the end.
8572  if(VMA_DEBUG_MARGIN > 0)
8573  {
8574  if(resultOffset < VMA_DEBUG_MARGIN)
8575  {
8576  return false;
8577  }
8578  resultOffset -= VMA_DEBUG_MARGIN;
8579  }
8580 
8581  // Apply alignment.
8582  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8583 
8584  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8585  // Make bigger alignment if necessary.
8586  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8587  {
8588  bool bufferImageGranularityConflict = false;
8589  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8590  {
8591  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8592  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8593  {
8594  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8595  {
8596  bufferImageGranularityConflict = true;
8597  break;
8598  }
8599  }
8600  else
8601  // Already on previous page.
8602  break;
8603  }
8604  if(bufferImageGranularityConflict)
8605  {
8606  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8607  }
8608  }
8609 
8610  // There is enough free space.
8611  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8612  suballocations1st.back().offset + suballocations1st.back().size :
8613  0;
8614  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8615  {
8616  // Check previous suballocations for BufferImageGranularity conflicts.
8617  // If conflict exists, allocation cannot be made here.
8618  if(bufferImageGranularity > 1)
8619  {
8620  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8621  {
8622  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8623  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8624  {
8625  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8626  {
8627  return false;
8628  }
8629  }
8630  else
8631  {
8632  // Already on next page.
8633  break;
8634  }
8635  }
8636  }
8637 
8638  // All tests passed: Success.
8639  pAllocationRequest->offset = resultOffset;
8640  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8641  pAllocationRequest->sumItemSize = 0;
8642  // pAllocationRequest->item unused.
8643  pAllocationRequest->itemsToMakeLostCount = 0;
8644  return true;
8645  }
8646  }
8647  else // !upperAddress
8648  {
8649  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8650  {
8651  // Try to allocate at the end of 1st vector.
8652 
8653  VkDeviceSize resultBaseOffset = 0;
8654  if(!suballocations1st.empty())
8655  {
8656  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8657  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8658  }
8659 
8660  // Start from offset equal to beginning of free space.
8661  VkDeviceSize resultOffset = resultBaseOffset;
8662 
8663  // Apply VMA_DEBUG_MARGIN at the beginning.
8664  if(VMA_DEBUG_MARGIN > 0)
8665  {
8666  resultOffset += VMA_DEBUG_MARGIN;
8667  }
8668 
8669  // Apply alignment.
8670  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8671 
8672  // Check previous suballocations for BufferImageGranularity conflicts.
8673  // Make bigger alignment if necessary.
8674  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8675  {
8676  bool bufferImageGranularityConflict = false;
8677  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8678  {
8679  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8680  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8681  {
8682  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8683  {
8684  bufferImageGranularityConflict = true;
8685  break;
8686  }
8687  }
8688  else
8689  // Already on previous page.
8690  break;
8691  }
8692  if(bufferImageGranularityConflict)
8693  {
8694  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8695  }
8696  }
8697 
8698  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8699  suballocations2nd.back().offset : size;
8700 
8701  // There is enough free space at the end after alignment.
8702  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8703  {
8704  // Check next suballocations for BufferImageGranularity conflicts.
8705  // If conflict exists, allocation cannot be made here.
8706  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8707  {
8708  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8709  {
8710  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8711  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8712  {
8713  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8714  {
8715  return false;
8716  }
8717  }
8718  else
8719  {
8720  // Already on previous page.
8721  break;
8722  }
8723  }
8724  }
8725 
8726  // All tests passed: Success.
8727  pAllocationRequest->offset = resultOffset;
8728  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8729  pAllocationRequest->sumItemSize = 0;
8730  // pAllocationRequest->item unused.
8731  pAllocationRequest->itemsToMakeLostCount = 0;
8732  return true;
8733  }
8734  }
8735 
8736  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8737  // beginning of 1st vector as the end of free space.
8738  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8739  {
8740  VMA_ASSERT(!suballocations1st.empty());
8741 
8742  VkDeviceSize resultBaseOffset = 0;
8743  if(!suballocations2nd.empty())
8744  {
8745  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8746  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8747  }
8748 
8749  // Start from offset equal to beginning of free space.
8750  VkDeviceSize resultOffset = resultBaseOffset;
8751 
8752  // Apply VMA_DEBUG_MARGIN at the beginning.
8753  if(VMA_DEBUG_MARGIN > 0)
8754  {
8755  resultOffset += VMA_DEBUG_MARGIN;
8756  }
8757 
8758  // Apply alignment.
8759  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8760 
8761  // Check previous suballocations for BufferImageGranularity conflicts.
8762  // Make bigger alignment if necessary.
8763  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8764  {
8765  bool bufferImageGranularityConflict = false;
8766  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8767  {
8768  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8769  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8770  {
8771  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8772  {
8773  bufferImageGranularityConflict = true;
8774  break;
8775  }
8776  }
8777  else
8778  // Already on previous page.
8779  break;
8780  }
8781  if(bufferImageGranularityConflict)
8782  {
8783  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8784  }
8785  }
8786 
8787  pAllocationRequest->itemsToMakeLostCount = 0;
8788  pAllocationRequest->sumItemSize = 0;
8789  size_t index1st = m_1stNullItemsBeginCount;
8790 
8791  if(canMakeOtherLost)
8792  {
8793  while(index1st < suballocations1st.size() &&
8794  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8795  {
8796  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8797  const VmaSuballocation& suballoc = suballocations1st[index1st];
8798  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8799  {
8800  // No problem.
8801  }
8802  else
8803  {
8804  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8805  if(suballoc.hAllocation->CanBecomeLost() &&
8806  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8807  {
8808  ++pAllocationRequest->itemsToMakeLostCount;
8809  pAllocationRequest->sumItemSize += suballoc.size;
8810  }
8811  else
8812  {
8813  return false;
8814  }
8815  }
8816  ++index1st;
8817  }
8818 
8819  // Check next suballocations for BufferImageGranularity conflicts.
8820  // If conflict exists, we must mark more allocations lost or fail.
8821  if(bufferImageGranularity > 1)
8822  {
8823  while(index1st < suballocations1st.size())
8824  {
8825  const VmaSuballocation& suballoc = suballocations1st[index1st];
8826  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8827  {
8828  if(suballoc.hAllocation != VK_NULL_HANDLE)
8829  {
8830  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8831  if(suballoc.hAllocation->CanBecomeLost() &&
8832  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8833  {
8834  ++pAllocationRequest->itemsToMakeLostCount;
8835  pAllocationRequest->sumItemSize += suballoc.size;
8836  }
8837  else
8838  {
8839  return false;
8840  }
8841  }
8842  }
8843  else
8844  {
8845  // Already on next page.
8846  break;
8847  }
8848  ++index1st;
8849  }
8850  }
8851  }
8852 
8853  // There is enough free space at the end after alignment.
8854  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8855  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8856  {
8857  // Check next suballocations for BufferImageGranularity conflicts.
8858  // If conflict exists, allocation cannot be made here.
8859  if(bufferImageGranularity > 1)
8860  {
8861  for(size_t nextSuballocIndex = index1st;
8862  nextSuballocIndex < suballocations1st.size();
8863  nextSuballocIndex++)
8864  {
8865  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8866  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8867  {
8868  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8869  {
8870  return false;
8871  }
8872  }
8873  else
8874  {
8875  // Already on next page.
8876  break;
8877  }
8878  }
8879  }
8880 
8881  // All tests passed: Success.
8882  pAllocationRequest->offset = resultOffset;
8883  pAllocationRequest->sumFreeSize =
8884  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8885  - resultBaseOffset
8886  - pAllocationRequest->sumItemSize;
8887  // pAllocationRequest->item unused.
8888  return true;
8889  }
8890  }
8891  }
8892 
8893  return false;
8894 }
8895 
8896 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8897  uint32_t currentFrameIndex,
8898  uint32_t frameInUseCount,
8899  VmaAllocationRequest* pAllocationRequest)
8900 {
8901  if(pAllocationRequest->itemsToMakeLostCount == 0)
8902  {
8903  return true;
8904  }
8905 
8906  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8907 
8908  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8909  size_t index1st = m_1stNullItemsBeginCount;
8910  size_t madeLostCount = 0;
8911  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8912  {
8913  VMA_ASSERT(index1st < suballocations1st.size());
8914  VmaSuballocation& suballoc = suballocations1st[index1st];
8915  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8916  {
8917  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8918  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8919  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8920  {
8921  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8922  suballoc.hAllocation = VK_NULL_HANDLE;
8923  m_SumFreeSize += suballoc.size;
8924  ++m_1stNullItemsMiddleCount;
8925  ++madeLostCount;
8926  }
8927  else
8928  {
8929  return false;
8930  }
8931  }
8932  ++index1st;
8933  }
8934 
8935  CleanupAfterFree();
8936  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8937 
8938  return true;
8939 }
8940 
8941 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8942 {
8943  uint32_t lostAllocationCount = 0;
8944 
8945  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8946  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8947  {
8948  VmaSuballocation& suballoc = suballocations1st[i];
8949  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8950  suballoc.hAllocation->CanBecomeLost() &&
8951  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8952  {
8953  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8954  suballoc.hAllocation = VK_NULL_HANDLE;
8955  ++m_1stNullItemsMiddleCount;
8956  m_SumFreeSize += suballoc.size;
8957  ++lostAllocationCount;
8958  }
8959  }
8960 
8961  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8962  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8963  {
8964  VmaSuballocation& suballoc = suballocations2nd[i];
8965  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8966  suballoc.hAllocation->CanBecomeLost() &&
8967  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8968  {
8969  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8970  suballoc.hAllocation = VK_NULL_HANDLE;
8971  ++m_2ndNullItemsCount;
8972  ++lostAllocationCount;
8973  }
8974  }
8975 
8976  if(lostAllocationCount)
8977  {
8978  CleanupAfterFree();
8979  }
8980 
8981  return lostAllocationCount;
8982 }
8983 
8984 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8985 {
8986  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8987  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8988  {
8989  const VmaSuballocation& suballoc = suballocations1st[i];
8990  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8991  {
8992  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8993  {
8994  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8995  return VK_ERROR_VALIDATION_FAILED_EXT;
8996  }
8997  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8998  {
8999  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9000  return VK_ERROR_VALIDATION_FAILED_EXT;
9001  }
9002  }
9003  }
9004 
9005  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9006  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9007  {
9008  const VmaSuballocation& suballoc = suballocations2nd[i];
9009  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9010  {
9011  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9012  {
9013  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9014  return VK_ERROR_VALIDATION_FAILED_EXT;
9015  }
9016  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9017  {
9018  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9019  return VK_ERROR_VALIDATION_FAILED_EXT;
9020  }
9021  }
9022  }
9023 
9024  return VK_SUCCESS;
9025 }
9026 
9027 void VmaBlockMetadata_Linear::Alloc(
9028  const VmaAllocationRequest& request,
9029  VmaSuballocationType type,
9030  VkDeviceSize allocSize,
9031  bool upperAddress,
9032  VmaAllocation hAllocation)
9033 {
9034  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9035 
9036  if(upperAddress)
9037  {
9038  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9039  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9040  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9041  suballocations2nd.push_back(newSuballoc);
9042  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9043  }
9044  else
9045  {
9046  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9047 
9048  // First allocation.
9049  if(suballocations1st.empty())
9050  {
9051  suballocations1st.push_back(newSuballoc);
9052  }
9053  else
9054  {
9055  // New allocation at the end of 1st vector.
9056  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9057  {
9058  // Check if it fits before the end of the block.
9059  VMA_ASSERT(request.offset + allocSize <= GetSize());
9060  suballocations1st.push_back(newSuballoc);
9061  }
9062  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9063  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9064  {
9065  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9066 
9067  switch(m_2ndVectorMode)
9068  {
9069  case SECOND_VECTOR_EMPTY:
9070  // First allocation from second part ring buffer.
9071  VMA_ASSERT(suballocations2nd.empty());
9072  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9073  break;
9074  case SECOND_VECTOR_RING_BUFFER:
9075  // 2-part ring buffer is already started.
9076  VMA_ASSERT(!suballocations2nd.empty());
9077  break;
9078  case SECOND_VECTOR_DOUBLE_STACK:
9079  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9080  break;
9081  default:
9082  VMA_ASSERT(0);
9083  }
9084 
9085  suballocations2nd.push_back(newSuballoc);
9086  }
9087  else
9088  {
9089  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9090  }
9091  }
9092  }
9093 
9094  m_SumFreeSize -= newSuballoc.size;
9095 }
9096 
9097 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9098 {
9099  FreeAtOffset(allocation->GetOffset());
9100 }
9101 
9102 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9103 {
9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9105  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9106 
9107  if(!suballocations1st.empty())
9108  {
9109  // First allocation: Mark it as next empty at the beginning.
9110  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9111  if(firstSuballoc.offset == offset)
9112  {
9113  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9114  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9115  m_SumFreeSize += firstSuballoc.size;
9116  ++m_1stNullItemsBeginCount;
9117  CleanupAfterFree();
9118  return;
9119  }
9120  }
9121 
9122  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9123  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9124  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9125  {
9126  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9127  if(lastSuballoc.offset == offset)
9128  {
9129  m_SumFreeSize += lastSuballoc.size;
9130  suballocations2nd.pop_back();
9131  CleanupAfterFree();
9132  return;
9133  }
9134  }
9135  // Last allocation in 1st vector.
9136  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9137  {
9138  VmaSuballocation& lastSuballoc = suballocations1st.back();
9139  if(lastSuballoc.offset == offset)
9140  {
9141  m_SumFreeSize += lastSuballoc.size;
9142  suballocations1st.pop_back();
9143  CleanupAfterFree();
9144  return;
9145  }
9146  }
9147 
9148  // Item from the middle of 1st vector.
9149  {
9150  VmaSuballocation refSuballoc;
9151  refSuballoc.offset = offset;
9152  // Rest of members stays uninitialized intentionally for better performance.
9153  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9154  suballocations1st.begin() + m_1stNullItemsBeginCount,
9155  suballocations1st.end(),
9156  refSuballoc);
9157  if(it != suballocations1st.end())
9158  {
9159  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9160  it->hAllocation = VK_NULL_HANDLE;
9161  ++m_1stNullItemsMiddleCount;
9162  m_SumFreeSize += it->size;
9163  CleanupAfterFree();
9164  return;
9165  }
9166  }
9167 
9168  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9169  {
9170  // Item from the middle of 2nd vector.
9171  VmaSuballocation refSuballoc;
9172  refSuballoc.offset = offset;
9173  // Rest of members stays uninitialized intentionally for better performance.
9174  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9175  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9176  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9177  if(it != suballocations2nd.end())
9178  {
9179  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9180  it->hAllocation = VK_NULL_HANDLE;
9181  ++m_2ndNullItemsCount;
9182  m_SumFreeSize += it->size;
9183  CleanupAfterFree();
9184  return;
9185  }
9186  }
9187 
9188  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9189 }
9190 
9191 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9192 {
9193  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9194  const size_t suballocCount = AccessSuballocations1st().size();
9195  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9196 }
9197 
9198 void VmaBlockMetadata_Linear::CleanupAfterFree()
9199 {
9200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9202 
9203  if(IsEmpty())
9204  {
9205  suballocations1st.clear();
9206  suballocations2nd.clear();
9207  m_1stNullItemsBeginCount = 0;
9208  m_1stNullItemsMiddleCount = 0;
9209  m_2ndNullItemsCount = 0;
9210  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9211  }
9212  else
9213  {
9214  const size_t suballoc1stCount = suballocations1st.size();
9215  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9216  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9217 
9218  // Find more null items at the beginning of 1st vector.
9219  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9220  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9221  {
9222  ++m_1stNullItemsBeginCount;
9223  --m_1stNullItemsMiddleCount;
9224  }
9225 
9226  // Find more null items at the end of 1st vector.
9227  while(m_1stNullItemsMiddleCount > 0 &&
9228  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9229  {
9230  --m_1stNullItemsMiddleCount;
9231  suballocations1st.pop_back();
9232  }
9233 
9234  // Find more null items at the end of 2nd vector.
9235  while(m_2ndNullItemsCount > 0 &&
9236  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9237  {
9238  --m_2ndNullItemsCount;
9239  suballocations2nd.pop_back();
9240  }
9241 
9242  if(ShouldCompact1st())
9243  {
9244  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9245  size_t srcIndex = m_1stNullItemsBeginCount;
9246  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9247  {
9248  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9249  {
9250  ++srcIndex;
9251  }
9252  if(dstIndex != srcIndex)
9253  {
9254  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9255  }
9256  ++srcIndex;
9257  }
9258  suballocations1st.resize(nonNullItemCount);
9259  m_1stNullItemsBeginCount = 0;
9260  m_1stNullItemsMiddleCount = 0;
9261  }
9262 
9263  // 2nd vector became empty.
9264  if(suballocations2nd.empty())
9265  {
9266  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9267  }
9268 
9269  // 1st vector became empty.
9270  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9271  {
9272  suballocations1st.clear();
9273  m_1stNullItemsBeginCount = 0;
9274 
9275  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9276  {
9277  // Swap 1st with 2nd. Now 2nd is empty.
9278  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9279  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9280  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9281  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9282  {
9283  ++m_1stNullItemsBeginCount;
9284  --m_1stNullItemsMiddleCount;
9285  }
9286  m_2ndNullItemsCount = 0;
9287  m_1stVectorIndex ^= 1;
9288  }
9289  }
9290  }
9291 
9292  VMA_HEAVY_ASSERT(Validate());
9293 }
9294 
9295 
9297 // class VmaBlockMetadata_Buddy
9298 
9299 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9300  VmaBlockMetadata(hAllocator),
9301  m_Root(VMA_NULL),
9302  m_AllocationCount(0),
9303  m_FreeCount(1),
9304  m_SumFreeSize(0)
9305 {
9306  memset(m_FreeList, 0, sizeof(m_FreeList));
9307 }
9308 
9309 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9310 {
9311  DeleteNode(m_Root);
9312 }
9313 
9314 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9315 {
9316  VmaBlockMetadata::Init(size);
9317 
9318  m_UsableSize = VmaPrevPow2(size);
9319  m_SumFreeSize = m_UsableSize;
9320 
9321  // Calculate m_LevelCount.
9322  m_LevelCount = 1;
9323  while(m_LevelCount < MAX_LEVELS &&
9324  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9325  {
9326  ++m_LevelCount;
9327  }
9328 
9329  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9330  rootNode->offset = 0;
9331  rootNode->type = Node::TYPE_FREE;
9332  rootNode->parent = VMA_NULL;
9333  rootNode->buddy = VMA_NULL;
9334 
9335  m_Root = rootNode;
9336  AddToFreeListFront(0, rootNode);
9337 }
9338 
9339 bool VmaBlockMetadata_Buddy::Validate() const
9340 {
9341  // Validate tree.
9342  ValidationContext ctx;
9343  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9344  {
9345  VMA_VALIDATE(false && "ValidateNode failed.");
9346  }
9347  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9348  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9349 
9350  // Validate free node lists.
9351  for(uint32_t level = 0; level < m_LevelCount; ++level)
9352  {
9353  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9354  m_FreeList[level].front->free.prev == VMA_NULL);
9355 
9356  for(Node* node = m_FreeList[level].front;
9357  node != VMA_NULL;
9358  node = node->free.next)
9359  {
9360  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9361 
9362  if(node->free.next == VMA_NULL)
9363  {
9364  VMA_VALIDATE(m_FreeList[level].back == node);
9365  }
9366  else
9367  {
9368  VMA_VALIDATE(node->free.next->free.prev == node);
9369  }
9370  }
9371  }
9372 
9373  // Validate that free lists ar higher levels are empty.
9374  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9375  {
9376  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9377  }
9378 
9379  return true;
9380 }
9381 
9382 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9383 {
9384  for(uint32_t level = 0; level < m_LevelCount; ++level)
9385  {
9386  if(m_FreeList[level].front != VMA_NULL)
9387  {
9388  return LevelToNodeSize(level);
9389  }
9390  }
9391  return 0;
9392 }
9393 
9394 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9395 {
9396  const VkDeviceSize unusableSize = GetUnusableSize();
9397 
9398  outInfo.blockCount = 1;
9399 
9400  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9401  outInfo.usedBytes = outInfo.unusedBytes = 0;
9402 
9403  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9404  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9405  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9406 
9407  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9408 
9409  if(unusableSize > 0)
9410  {
9411  ++outInfo.unusedRangeCount;
9412  outInfo.unusedBytes += unusableSize;
9413  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9415  }
9416 }
9417 
9418 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9419 {
9420  const VkDeviceSize unusableSize = GetUnusableSize();
9421 
9422  inoutStats.size += GetSize();
9423  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9424  inoutStats.allocationCount += m_AllocationCount;
9425  inoutStats.unusedRangeCount += m_FreeCount;
9426  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9427 
9428  if(unusableSize > 0)
9429  {
9430  ++inoutStats.unusedRangeCount;
9431  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9432  }
9433 }
9434 
9435 #if VMA_STATS_STRING_ENABLED
9436 
9437 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9438 {
9439  // TODO optimize
9440  VmaStatInfo stat;
9441  CalcAllocationStatInfo(stat);
9442 
9443  PrintDetailedMap_Begin(
9444  json,
9445  stat.unusedBytes,
9446  stat.allocationCount,
9447  stat.unusedRangeCount);
9448 
9449  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9450 
9451  const VkDeviceSize unusableSize = GetUnusableSize();
9452  if(unusableSize > 0)
9453  {
9454  PrintDetailedMap_UnusedRange(json,
9455  m_UsableSize, // offset
9456  unusableSize); // size
9457  }
9458 
9459  PrintDetailedMap_End(json);
9460 }
9461 
9462 #endif // #if VMA_STATS_STRING_ENABLED
9463 
9464 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9465  uint32_t currentFrameIndex,
9466  uint32_t frameInUseCount,
9467  VkDeviceSize bufferImageGranularity,
9468  VkDeviceSize allocSize,
9469  VkDeviceSize allocAlignment,
9470  bool upperAddress,
9471  VmaSuballocationType allocType,
9472  bool canMakeOtherLost,
9473  uint32_t strategy,
9474  VmaAllocationRequest* pAllocationRequest)
9475 {
9476  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9477 
9478  // Simple way to respect bufferImageGranularity. May be optimized some day.
9479  // Whenever it might be an OPTIMAL image...
9480  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9481  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9482  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9483  {
9484  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9485  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9486  }
9487 
9488  if(allocSize > m_UsableSize)
9489  {
9490  return false;
9491  }
9492 
9493  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9494  for(uint32_t level = targetLevel + 1; level--; )
9495  {
9496  for(Node* freeNode = m_FreeList[level].front;
9497  freeNode != VMA_NULL;
9498  freeNode = freeNode->free.next)
9499  {
9500  if(freeNode->offset % allocAlignment == 0)
9501  {
9502  pAllocationRequest->offset = freeNode->offset;
9503  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9504  pAllocationRequest->sumItemSize = 0;
9505  pAllocationRequest->itemsToMakeLostCount = 0;
9506  pAllocationRequest->customData = (void*)(uintptr_t)level;
9507  return true;
9508  }
9509  }
9510  }
9511 
9512  return false;
9513 }
9514 
9515 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9516  uint32_t currentFrameIndex,
9517  uint32_t frameInUseCount,
9518  VmaAllocationRequest* pAllocationRequest)
9519 {
9520  /*
9521  Lost allocations are not supported in buddy allocator at the moment.
9522  Support might be added in the future.
9523  */
9524  return pAllocationRequest->itemsToMakeLostCount == 0;
9525 }
9526 
9527 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9528 {
9529  /*
9530  Lost allocations are not supported in buddy allocator at the moment.
9531  Support might be added in the future.
9532  */
9533  return 0;
9534 }
9535 
9536 void VmaBlockMetadata_Buddy::Alloc(
9537  const VmaAllocationRequest& request,
9538  VmaSuballocationType type,
9539  VkDeviceSize allocSize,
9540  bool upperAddress,
9541  VmaAllocation hAllocation)
9542 {
9543  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9544  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9545 
9546  Node* currNode = m_FreeList[currLevel].front;
9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9548  while(currNode->offset != request.offset)
9549  {
9550  currNode = currNode->free.next;
9551  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9552  }
9553 
9554  // Go down, splitting free nodes.
9555  while(currLevel < targetLevel)
9556  {
9557  // currNode is already first free node at currLevel.
9558  // Remove it from list of free nodes at this currLevel.
9559  RemoveFromFreeList(currLevel, currNode);
9560 
9561  const uint32_t childrenLevel = currLevel + 1;
9562 
9563  // Create two free sub-nodes.
9564  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9565  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9566 
9567  leftChild->offset = currNode->offset;
9568  leftChild->type = Node::TYPE_FREE;
9569  leftChild->parent = currNode;
9570  leftChild->buddy = rightChild;
9571 
9572  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9573  rightChild->type = Node::TYPE_FREE;
9574  rightChild->parent = currNode;
9575  rightChild->buddy = leftChild;
9576 
9577  // Convert current currNode to split type.
9578  currNode->type = Node::TYPE_SPLIT;
9579  currNode->split.leftChild = leftChild;
9580 
9581  // Add child nodes to free list. Order is important!
9582  AddToFreeListFront(childrenLevel, rightChild);
9583  AddToFreeListFront(childrenLevel, leftChild);
9584 
9585  ++m_FreeCount;
9586  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9587  ++currLevel;
9588  currNode = m_FreeList[currLevel].front;
9589 
9590  /*
9591  We can be sure that currNode, as left child of node previously split,
9592  also fullfills the alignment requirement.
9593  */
9594  }
9595 
9596  // Remove from free list.
9597  VMA_ASSERT(currLevel == targetLevel &&
9598  currNode != VMA_NULL &&
9599  currNode->type == Node::TYPE_FREE);
9600  RemoveFromFreeList(currLevel, currNode);
9601 
9602  // Convert to allocation node.
9603  currNode->type = Node::TYPE_ALLOCATION;
9604  currNode->allocation.alloc = hAllocation;
9605 
9606  ++m_AllocationCount;
9607  --m_FreeCount;
9608  m_SumFreeSize -= allocSize;
9609 }
9610 
9611 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9612 {
9613  if(node->type == Node::TYPE_SPLIT)
9614  {
9615  DeleteNode(node->split.leftChild->buddy);
9616  DeleteNode(node->split.leftChild);
9617  }
9618 
9619  vma_delete(GetAllocationCallbacks(), node);
9620 }
9621 
9622 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9623 {
9624  VMA_VALIDATE(level < m_LevelCount);
9625  VMA_VALIDATE(curr->parent == parent);
9626  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9627  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9628  switch(curr->type)
9629  {
9630  case Node::TYPE_FREE:
9631  // curr->free.prev, next are validated separately.
9632  ctx.calculatedSumFreeSize += levelNodeSize;
9633  ++ctx.calculatedFreeCount;
9634  break;
9635  case Node::TYPE_ALLOCATION:
9636  ++ctx.calculatedAllocationCount;
9637  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9638  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9639  break;
9640  case Node::TYPE_SPLIT:
9641  {
9642  const uint32_t childrenLevel = level + 1;
9643  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9644  const Node* const leftChild = curr->split.leftChild;
9645  VMA_VALIDATE(leftChild != VMA_NULL);
9646  VMA_VALIDATE(leftChild->offset == curr->offset);
9647  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9648  {
9649  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9650  }
9651  const Node* const rightChild = leftChild->buddy;
9652  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9653  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9654  {
9655  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9656  }
9657  }
9658  break;
9659  default:
9660  return false;
9661  }
9662 
9663  return true;
9664 }
9665 
9666 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9667 {
9668  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9669  uint32_t level = 0;
9670  VkDeviceSize currLevelNodeSize = m_UsableSize;
9671  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9672  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9673  {
9674  ++level;
9675  currLevelNodeSize = nextLevelNodeSize;
9676  nextLevelNodeSize = currLevelNodeSize >> 1;
9677  }
9678  return level;
9679 }
9680 
9681 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9682 {
9683  // Find node and level.
9684  Node* node = m_Root;
9685  VkDeviceSize nodeOffset = 0;
9686  uint32_t level = 0;
9687  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9688  while(node->type == Node::TYPE_SPLIT)
9689  {
9690  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9691  if(offset < nodeOffset + nextLevelSize)
9692  {
9693  node = node->split.leftChild;
9694  }
9695  else
9696  {
9697  node = node->split.leftChild->buddy;
9698  nodeOffset += nextLevelSize;
9699  }
9700  ++level;
9701  levelNodeSize = nextLevelSize;
9702  }
9703 
9704  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9705  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9706 
9707  ++m_FreeCount;
9708  --m_AllocationCount;
9709  m_SumFreeSize += alloc->GetSize();
9710 
9711  node->type = Node::TYPE_FREE;
9712 
9713  // Join free nodes if possible.
9714  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9715  {
9716  RemoveFromFreeList(level, node->buddy);
9717  Node* const parent = node->parent;
9718 
9719  vma_delete(GetAllocationCallbacks(), node->buddy);
9720  vma_delete(GetAllocationCallbacks(), node);
9721  parent->type = Node::TYPE_FREE;
9722 
9723  node = parent;
9724  --level;
9725  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9726  --m_FreeCount;
9727  }
9728 
9729  AddToFreeListFront(level, node);
9730 }
9731 
9732 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9733 {
9734  switch(node->type)
9735  {
9736  case Node::TYPE_FREE:
9737  ++outInfo.unusedRangeCount;
9738  outInfo.unusedBytes += levelNodeSize;
9739  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9740  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9741  break;
9742  case Node::TYPE_ALLOCATION:
9743  {
9744  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9745  ++outInfo.allocationCount;
9746  outInfo.usedBytes += allocSize;
9747  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9748  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9749 
9750  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9751  if(unusedRangeSize > 0)
9752  {
9753  ++outInfo.unusedRangeCount;
9754  outInfo.unusedBytes += unusedRangeSize;
9755  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9756  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9757  }
9758  }
9759  break;
9760  case Node::TYPE_SPLIT:
9761  {
9762  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9763  const Node* const leftChild = node->split.leftChild;
9764  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9765  const Node* const rightChild = leftChild->buddy;
9766  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9767  }
9768  break;
9769  default:
9770  VMA_ASSERT(0);
9771  }
9772 }
9773 
9774 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9775 {
9776  VMA_ASSERT(node->type == Node::TYPE_FREE);
9777 
9778  // List is empty.
9779  Node* const frontNode = m_FreeList[level].front;
9780  if(frontNode == VMA_NULL)
9781  {
9782  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9783  node->free.prev = node->free.next = VMA_NULL;
9784  m_FreeList[level].front = m_FreeList[level].back = node;
9785  }
9786  else
9787  {
9788  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9789  node->free.prev = VMA_NULL;
9790  node->free.next = frontNode;
9791  frontNode->free.prev = node;
9792  m_FreeList[level].front = node;
9793  }
9794 }
9795 
9796 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9797 {
9798  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9799 
9800  // It is at the front.
9801  if(node->free.prev == VMA_NULL)
9802  {
9803  VMA_ASSERT(m_FreeList[level].front == node);
9804  m_FreeList[level].front = node->free.next;
9805  }
9806  else
9807  {
9808  Node* const prevFreeNode = node->free.prev;
9809  VMA_ASSERT(prevFreeNode->free.next == node);
9810  prevFreeNode->free.next = node->free.next;
9811  }
9812 
9813  // It is at the back.
9814  if(node->free.next == VMA_NULL)
9815  {
9816  VMA_ASSERT(m_FreeList[level].back == node);
9817  m_FreeList[level].back = node->free.prev;
9818  }
9819  else
9820  {
9821  Node* const nextFreeNode = node->free.next;
9822  VMA_ASSERT(nextFreeNode->free.prev == node);
9823  nextFreeNode->free.prev = node->free.prev;
9824  }
9825 }
9826 
9827 #if VMA_STATS_STRING_ENABLED
9828 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9829 {
9830  switch(node->type)
9831  {
9832  case Node::TYPE_FREE:
9833  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9834  break;
9835  case Node::TYPE_ALLOCATION:
9836  {
9837  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9838  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9839  if(allocSize < levelNodeSize)
9840  {
9841  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9842  }
9843  }
9844  break;
9845  case Node::TYPE_SPLIT:
9846  {
9847  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9848  const Node* const leftChild = node->split.leftChild;
9849  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9850  const Node* const rightChild = leftChild->buddy;
9851  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9852  }
9853  break;
9854  default:
9855  VMA_ASSERT(0);
9856  }
9857 }
9858 #endif // #if VMA_STATS_STRING_ENABLED
9859 
9860 
9862 // class VmaDeviceMemoryBlock
9863 
9864 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9865  m_pMetadata(VMA_NULL),
9866  m_MemoryTypeIndex(UINT32_MAX),
9867  m_Id(0),
9868  m_hMemory(VK_NULL_HANDLE),
9869  m_MapCount(0),
9870  m_pMappedData(VMA_NULL)
9871 {
9872 }
9873 
9874 void VmaDeviceMemoryBlock::Init(
9875  VmaAllocator hAllocator,
9876  uint32_t newMemoryTypeIndex,
9877  VkDeviceMemory newMemory,
9878  VkDeviceSize newSize,
9879  uint32_t id,
9880  uint32_t algorithm)
9881 {
9882  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9883 
9884  m_MemoryTypeIndex = newMemoryTypeIndex;
9885  m_Id = id;
9886  m_hMemory = newMemory;
9887 
9888  switch(algorithm)
9889  {
9891  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9892  break;
9894  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9895  break;
9896  default:
9897  VMA_ASSERT(0);
9898  // Fall-through.
9899  case 0:
9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9901  }
9902  m_pMetadata->Init(newSize);
9903 }
9904 
9905 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9906 {
9907  // This is the most important assert in the entire library.
9908  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9909  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9910 
9911  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9912  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9913  m_hMemory = VK_NULL_HANDLE;
9914 
9915  vma_delete(allocator, m_pMetadata);
9916  m_pMetadata = VMA_NULL;
9917 }
9918 
9919 bool VmaDeviceMemoryBlock::Validate() const
9920 {
9921  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9922  (m_pMetadata->GetSize() != 0));
9923 
9924  return m_pMetadata->Validate();
9925 }
9926 
9927 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9928 {
9929  void* pData = nullptr;
9930  VkResult res = Map(hAllocator, 1, &pData);
9931  if(res != VK_SUCCESS)
9932  {
9933  return res;
9934  }
9935 
9936  res = m_pMetadata->CheckCorruption(pData);
9937 
9938  Unmap(hAllocator, 1);
9939 
9940  return res;
9941 }
9942 
9943 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9944 {
9945  if(count == 0)
9946  {
9947  return VK_SUCCESS;
9948  }
9949 
9950  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9951  if(m_MapCount != 0)
9952  {
9953  m_MapCount += count;
9954  VMA_ASSERT(m_pMappedData != VMA_NULL);
9955  if(ppData != VMA_NULL)
9956  {
9957  *ppData = m_pMappedData;
9958  }
9959  return VK_SUCCESS;
9960  }
9961  else
9962  {
9963  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9964  hAllocator->m_hDevice,
9965  m_hMemory,
9966  0, // offset
9967  VK_WHOLE_SIZE,
9968  0, // flags
9969  &m_pMappedData);
9970  if(result == VK_SUCCESS)
9971  {
9972  if(ppData != VMA_NULL)
9973  {
9974  *ppData = m_pMappedData;
9975  }
9976  m_MapCount = count;
9977  }
9978  return result;
9979  }
9980 }
9981 
9982 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9983 {
9984  if(count == 0)
9985  {
9986  return;
9987  }
9988 
9989  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9990  if(m_MapCount >= count)
9991  {
9992  m_MapCount -= count;
9993  if(m_MapCount == 0)
9994  {
9995  m_pMappedData = VMA_NULL;
9996  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9997  }
9998  }
9999  else
10000  {
10001  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10002  }
10003 }
10004 
10005 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10006 {
10007  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10008  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10009 
10010  void* pData;
10011  VkResult res = Map(hAllocator, 1, &pData);
10012  if(res != VK_SUCCESS)
10013  {
10014  return res;
10015  }
10016 
10017  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10018  VmaWriteMagicValue(pData, allocOffset + allocSize);
10019 
10020  Unmap(hAllocator, 1);
10021 
10022  return VK_SUCCESS;
10023 }
10024 
10025 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10026 {
10027  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10028  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10029 
10030  void* pData;
10031  VkResult res = Map(hAllocator, 1, &pData);
10032  if(res != VK_SUCCESS)
10033  {
10034  return res;
10035  }
10036 
10037  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10038  {
10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10040  }
10041  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10042  {
10043  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10044  }
10045 
10046  Unmap(hAllocator, 1);
10047 
10048  return VK_SUCCESS;
10049 }
10050 
10051 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10052  const VmaAllocator hAllocator,
10053  const VmaAllocation hAllocation,
10054  VkBuffer hBuffer)
10055 {
10056  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10057  hAllocation->GetBlock() == this);
10058  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10059  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10060  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10061  hAllocator->m_hDevice,
10062  hBuffer,
10063  m_hMemory,
10064  hAllocation->GetOffset());
10065 }
10066 
10067 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10068  const VmaAllocator hAllocator,
10069  const VmaAllocation hAllocation,
10070  VkImage hImage)
10071 {
10072  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10073  hAllocation->GetBlock() == this);
10074  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10075  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10076  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10077  hAllocator->m_hDevice,
10078  hImage,
10079  m_hMemory,
10080  hAllocation->GetOffset());
10081 }
10082 
10083 static void InitStatInfo(VmaStatInfo& outInfo)
10084 {
10085  memset(&outInfo, 0, sizeof(outInfo));
10086  outInfo.allocationSizeMin = UINT64_MAX;
10087  outInfo.unusedRangeSizeMin = UINT64_MAX;
10088 }
10089 
10090 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10091 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10092 {
10093  inoutInfo.blockCount += srcInfo.blockCount;
10094  inoutInfo.allocationCount += srcInfo.allocationCount;
10095  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10096  inoutInfo.usedBytes += srcInfo.usedBytes;
10097  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10098  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10099  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10100  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10101  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10102 }
10103 
10104 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10105 {
10106  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10107  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10108  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10109  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10110 }
10111 
10112 VmaPool_T::VmaPool_T(
10113  VmaAllocator hAllocator,
10114  const VmaPoolCreateInfo& createInfo,
10115  VkDeviceSize preferredBlockSize) :
10116  m_BlockVector(
10117  hAllocator,
10118  createInfo.memoryTypeIndex,
10119  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10120  createInfo.minBlockCount,
10121  createInfo.maxBlockCount,
10122  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10123  createInfo.frameInUseCount,
10124  true, // isCustomPool
10125  createInfo.blockSize != 0, // explicitBlockSize
10126  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10127  m_Id(0)
10128 {
10129 }
10130 
10131 VmaPool_T::~VmaPool_T()
10132 {
10133 }
10134 
10135 #if VMA_STATS_STRING_ENABLED
10136 
10137 #endif // #if VMA_STATS_STRING_ENABLED
10138 
10139 VmaBlockVector::VmaBlockVector(
10140  VmaAllocator hAllocator,
10141  uint32_t memoryTypeIndex,
10142  VkDeviceSize preferredBlockSize,
10143  size_t minBlockCount,
10144  size_t maxBlockCount,
10145  VkDeviceSize bufferImageGranularity,
10146  uint32_t frameInUseCount,
10147  bool isCustomPool,
10148  bool explicitBlockSize,
10149  uint32_t algorithm) :
10150  m_hAllocator(hAllocator),
10151  m_MemoryTypeIndex(memoryTypeIndex),
10152  m_PreferredBlockSize(preferredBlockSize),
10153  m_MinBlockCount(minBlockCount),
10154  m_MaxBlockCount(maxBlockCount),
10155  m_BufferImageGranularity(bufferImageGranularity),
10156  m_FrameInUseCount(frameInUseCount),
10157  m_IsCustomPool(isCustomPool),
10158  m_ExplicitBlockSize(explicitBlockSize),
10159  m_Algorithm(algorithm),
10160  m_HasEmptyBlock(false),
10161  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10162  m_pDefragmentator(VMA_NULL),
10163  m_NextBlockId(0)
10164 {
10165 }
10166 
10167 VmaBlockVector::~VmaBlockVector()
10168 {
10169  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10170 
10171  for(size_t i = m_Blocks.size(); i--; )
10172  {
10173  m_Blocks[i]->Destroy(m_hAllocator);
10174  vma_delete(m_hAllocator, m_Blocks[i]);
10175  }
10176 }
10177 
10178 VkResult VmaBlockVector::CreateMinBlocks()
10179 {
10180  for(size_t i = 0; i < m_MinBlockCount; ++i)
10181  {
10182  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10183  if(res != VK_SUCCESS)
10184  {
10185  return res;
10186  }
10187  }
10188  return VK_SUCCESS;
10189 }
10190 
10191 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10192 {
10193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10194 
10195  const size_t blockCount = m_Blocks.size();
10196 
10197  pStats->size = 0;
10198  pStats->unusedSize = 0;
10199  pStats->allocationCount = 0;
10200  pStats->unusedRangeCount = 0;
10201  pStats->unusedRangeSizeMax = 0;
10202  pStats->blockCount = blockCount;
10203 
10204  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10205  {
10206  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10207  VMA_ASSERT(pBlock);
10208  VMA_HEAVY_ASSERT(pBlock->Validate());
10209  pBlock->m_pMetadata->AddPoolStats(*pStats);
10210  }
10211 }
10212 
10213 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10214 {
10215  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10216  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10217  (VMA_DEBUG_MARGIN > 0) &&
10218  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10219 }
10220 
10221 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10222 
10223 VkResult VmaBlockVector::Allocate(
10224  VmaPool hCurrentPool,
10225  uint32_t currentFrameIndex,
10226  VkDeviceSize size,
10227  VkDeviceSize alignment,
10228  const VmaAllocationCreateInfo& createInfo,
10229  VmaSuballocationType suballocType,
10230  VmaAllocation* pAllocation)
10231 {
10232  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10233  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10234  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10235  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10236  const bool canCreateNewBlock =
10237  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10238  (m_Blocks.size() < m_MaxBlockCount);
10239  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10240 
10241  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10242  // Which in turn is available only when maxBlockCount = 1.
10243  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10244  {
10245  canMakeOtherLost = false;
10246  }
10247 
10248  // Upper address can only be used with linear allocator and within single memory block.
10249  if(isUpperAddress &&
10250  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10251  {
10252  return VK_ERROR_FEATURE_NOT_PRESENT;
10253  }
10254 
10255  // Validate strategy.
10256  switch(strategy)
10257  {
10258  case 0:
10260  break;
10264  break;
10265  default:
10266  return VK_ERROR_FEATURE_NOT_PRESENT;
10267  }
10268 
10269  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10270  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10271  {
10272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10273  }
10274 
10275  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10276 
10277  /*
10278  Under certain condition, this whole section can be skipped for optimization, so
10279  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10280  e.g. for custom pools with linear algorithm.
10281  */
10282  if(!canMakeOtherLost || canCreateNewBlock)
10283  {
10284  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10285  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10287 
10288  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10289  {
10290  // Use only last block.
10291  if(!m_Blocks.empty())
10292  {
10293  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10294  VMA_ASSERT(pCurrBlock);
10295  VkResult res = AllocateFromBlock(
10296  pCurrBlock,
10297  hCurrentPool,
10298  currentFrameIndex,
10299  size,
10300  alignment,
10301  allocFlagsCopy,
10302  createInfo.pUserData,
10303  suballocType,
10304  strategy,
10305  pAllocation);
10306  if(res == VK_SUCCESS)
10307  {
10308  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10309  return VK_SUCCESS;
10310  }
10311  }
10312  }
10313  else
10314  {
10316  {
10317  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10318  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10319  {
10320  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10321  VMA_ASSERT(pCurrBlock);
10322  VkResult res = AllocateFromBlock(
10323  pCurrBlock,
10324  hCurrentPool,
10325  currentFrameIndex,
10326  size,
10327  alignment,
10328  allocFlagsCopy,
10329  createInfo.pUserData,
10330  suballocType,
10331  strategy,
10332  pAllocation);
10333  if(res == VK_SUCCESS)
10334  {
10335  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10336  return VK_SUCCESS;
10337  }
10338  }
10339  }
10340  else // WORST_FIT, FIRST_FIT
10341  {
10342  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10343  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10344  {
10345  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10346  VMA_ASSERT(pCurrBlock);
10347  VkResult res = AllocateFromBlock(
10348  pCurrBlock,
10349  hCurrentPool,
10350  currentFrameIndex,
10351  size,
10352  alignment,
10353  allocFlagsCopy,
10354  createInfo.pUserData,
10355  suballocType,
10356  strategy,
10357  pAllocation);
10358  if(res == VK_SUCCESS)
10359  {
10360  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10361  return VK_SUCCESS;
10362  }
10363  }
10364  }
10365  }
10366 
10367  // 2. Try to create new block.
10368  if(canCreateNewBlock)
10369  {
10370  // Calculate optimal size for new block.
10371  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10372  uint32_t newBlockSizeShift = 0;
10373  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10374 
10375  if(!m_ExplicitBlockSize)
10376  {
10377  // Allocate 1/8, 1/4, 1/2 as first blocks.
10378  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10379  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10380  {
10381  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10382  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10383  {
10384  newBlockSize = smallerNewBlockSize;
10385  ++newBlockSizeShift;
10386  }
10387  else
10388  {
10389  break;
10390  }
10391  }
10392  }
10393 
10394  size_t newBlockIndex = 0;
10395  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10396  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10397  if(!m_ExplicitBlockSize)
10398  {
10399  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10400  {
10401  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10402  if(smallerNewBlockSize >= size)
10403  {
10404  newBlockSize = smallerNewBlockSize;
10405  ++newBlockSizeShift;
10406  res = CreateBlock(newBlockSize, &newBlockIndex);
10407  }
10408  else
10409  {
10410  break;
10411  }
10412  }
10413  }
10414 
10415  if(res == VK_SUCCESS)
10416  {
10417  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10418  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10419 
10420  res = AllocateFromBlock(
10421  pBlock,
10422  hCurrentPool,
10423  currentFrameIndex,
10424  size,
10425  alignment,
10426  allocFlagsCopy,
10427  createInfo.pUserData,
10428  suballocType,
10429  strategy,
10430  pAllocation);
10431  if(res == VK_SUCCESS)
10432  {
10433  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10434  return VK_SUCCESS;
10435  }
10436  else
10437  {
10438  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10439  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10440  }
10441  }
10442  }
10443  }
10444 
10445  // 3. Try to allocate from existing blocks with making other allocations lost.
10446  if(canMakeOtherLost)
10447  {
10448  uint32_t tryIndex = 0;
10449  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10450  {
10451  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10452  VmaAllocationRequest bestRequest = {};
10453  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10454 
10455  // 1. Search existing allocations.
10457  {
10458  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10459  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10460  {
10461  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10462  VMA_ASSERT(pCurrBlock);
10463  VmaAllocationRequest currRequest = {};
10464  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10465  currentFrameIndex,
10466  m_FrameInUseCount,
10467  m_BufferImageGranularity,
10468  size,
10469  alignment,
10470  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10471  suballocType,
10472  canMakeOtherLost,
10473  strategy,
10474  &currRequest))
10475  {
10476  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10477  if(pBestRequestBlock == VMA_NULL ||
10478  currRequestCost < bestRequestCost)
10479  {
10480  pBestRequestBlock = pCurrBlock;
10481  bestRequest = currRequest;
10482  bestRequestCost = currRequestCost;
10483 
10484  if(bestRequestCost == 0)
10485  {
10486  break;
10487  }
10488  }
10489  }
10490  }
10491  }
10492  else // WORST_FIT, FIRST_FIT
10493  {
10494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10496  {
10497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10498  VMA_ASSERT(pCurrBlock);
10499  VmaAllocationRequest currRequest = {};
10500  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10501  currentFrameIndex,
10502  m_FrameInUseCount,
10503  m_BufferImageGranularity,
10504  size,
10505  alignment,
10506  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10507  suballocType,
10508  canMakeOtherLost,
10509  strategy,
10510  &currRequest))
10511  {
10512  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10513  if(pBestRequestBlock == VMA_NULL ||
10514  currRequestCost < bestRequestCost ||
10516  {
10517  pBestRequestBlock = pCurrBlock;
10518  bestRequest = currRequest;
10519  bestRequestCost = currRequestCost;
10520 
10521  if(bestRequestCost == 0 ||
10523  {
10524  break;
10525  }
10526  }
10527  }
10528  }
10529  }
10530 
10531  if(pBestRequestBlock != VMA_NULL)
10532  {
10533  if(mapped)
10534  {
10535  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10536  if(res != VK_SUCCESS)
10537  {
10538  return res;
10539  }
10540  }
10541 
10542  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10543  currentFrameIndex,
10544  m_FrameInUseCount,
10545  &bestRequest))
10546  {
10547  // We no longer have an empty Allocation.
10548  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10549  {
10550  m_HasEmptyBlock = false;
10551  }
10552  // Allocate from this pBlock.
10553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10554  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10555  (*pAllocation)->InitBlockAllocation(
10556  hCurrentPool,
10557  pBestRequestBlock,
10558  bestRequest.offset,
10559  alignment,
10560  size,
10561  suballocType,
10562  mapped,
10563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10564  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10565  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10566  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10567  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10568  {
10569  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10570  }
10571  if(IsCorruptionDetectionEnabled())
10572  {
10573  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10574  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10575  }
10576  return VK_SUCCESS;
10577  }
10578  // else: Some allocations must have been touched while we are here. Next try.
10579  }
10580  else
10581  {
10582  // Could not find place in any of the blocks - break outer loop.
10583  break;
10584  }
10585  }
10586  /* Maximum number of tries exceeded - a very unlike event when many other
10587  threads are simultaneously touching allocations making it impossible to make
10588  lost at the same time as we try to allocate. */
10589  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10590  {
10591  return VK_ERROR_TOO_MANY_OBJECTS;
10592  }
10593  }
10594 
10595  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10596 }
10597 
10598 void VmaBlockVector::Free(
10599  VmaAllocation hAllocation)
10600 {
10601  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10602 
10603  // Scope for lock.
10604  {
10605  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10606 
10607  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10608 
10609  if(IsCorruptionDetectionEnabled())
10610  {
10611  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10612  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10613  }
10614 
10615  if(hAllocation->IsPersistentMap())
10616  {
10617  pBlock->Unmap(m_hAllocator, 1);
10618  }
10619 
10620  pBlock->m_pMetadata->Free(hAllocation);
10621  VMA_HEAVY_ASSERT(pBlock->Validate());
10622 
10623  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10624 
10625  // pBlock became empty after this deallocation.
10626  if(pBlock->m_pMetadata->IsEmpty())
10627  {
10628  // Already has empty Allocation. We don't want to have two, so delete this one.
10629  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10630  {
10631  pBlockToDelete = pBlock;
10632  Remove(pBlock);
10633  }
10634  // We now have first empty block.
10635  else
10636  {
10637  m_HasEmptyBlock = true;
10638  }
10639  }
10640  // pBlock didn't become empty, but we have another empty block - find and free that one.
10641  // (This is optional, heuristics.)
10642  else if(m_HasEmptyBlock)
10643  {
10644  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10645  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10646  {
10647  pBlockToDelete = pLastBlock;
10648  m_Blocks.pop_back();
10649  m_HasEmptyBlock = false;
10650  }
10651  }
10652 
10653  IncrementallySortBlocks();
10654  }
10655 
10656  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10657  // lock, for performance reason.
10658  if(pBlockToDelete != VMA_NULL)
10659  {
10660  VMA_DEBUG_LOG(" Deleted empty allocation");
10661  pBlockToDelete->Destroy(m_hAllocator);
10662  vma_delete(m_hAllocator, pBlockToDelete);
10663  }
10664 }
10665 
10666 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10667 {
10668  VkDeviceSize result = 0;
10669  for(size_t i = m_Blocks.size(); i--; )
10670  {
10671  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10672  if(result >= m_PreferredBlockSize)
10673  {
10674  break;
10675  }
10676  }
10677  return result;
10678 }
10679 
10680 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10681 {
10682  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10683  {
10684  if(m_Blocks[blockIndex] == pBlock)
10685  {
10686  VmaVectorRemove(m_Blocks, blockIndex);
10687  return;
10688  }
10689  }
10690  VMA_ASSERT(0);
10691 }
10692 
10693 void VmaBlockVector::IncrementallySortBlocks()
10694 {
10695  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10696  {
10697  // Bubble sort only until first swap.
10698  for(size_t i = 1; i < m_Blocks.size(); ++i)
10699  {
10700  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10701  {
10702  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10703  return;
10704  }
10705  }
10706  }
10707 }
10708 
10709 VkResult VmaBlockVector::AllocateFromBlock(
10710  VmaDeviceMemoryBlock* pBlock,
10711  VmaPool hCurrentPool,
10712  uint32_t currentFrameIndex,
10713  VkDeviceSize size,
10714  VkDeviceSize alignment,
10715  VmaAllocationCreateFlags allocFlags,
10716  void* pUserData,
10717  VmaSuballocationType suballocType,
10718  uint32_t strategy,
10719  VmaAllocation* pAllocation)
10720 {
10721  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10722  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10723  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10724  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10725 
10726  VmaAllocationRequest currRequest = {};
10727  if(pBlock->m_pMetadata->CreateAllocationRequest(
10728  currentFrameIndex,
10729  m_FrameInUseCount,
10730  m_BufferImageGranularity,
10731  size,
10732  alignment,
10733  isUpperAddress,
10734  suballocType,
10735  false, // canMakeOtherLost
10736  strategy,
10737  &currRequest))
10738  {
10739  // Allocate from pCurrBlock.
10740  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10741 
10742  if(mapped)
10743  {
10744  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10745  if(res != VK_SUCCESS)
10746  {
10747  return res;
10748  }
10749  }
10750 
10751  // We no longer have an empty Allocation.
10752  if(pBlock->m_pMetadata->IsEmpty())
10753  {
10754  m_HasEmptyBlock = false;
10755  }
10756 
10757  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10758  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10759  (*pAllocation)->InitBlockAllocation(
10760  hCurrentPool,
10761  pBlock,
10762  currRequest.offset,
10763  alignment,
10764  size,
10765  suballocType,
10766  mapped,
10767  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10768  VMA_HEAVY_ASSERT(pBlock->Validate());
10769  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10771  {
10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10773  }
10774  if(IsCorruptionDetectionEnabled())
10775  {
10776  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10778  }
10779  return VK_SUCCESS;
10780  }
10781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10782 }
10783 
10784 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10785 {
10786  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10787  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10788  allocInfo.allocationSize = blockSize;
10789  VkDeviceMemory mem = VK_NULL_HANDLE;
10790  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10791  if(res < 0)
10792  {
10793  return res;
10794  }
10795 
10796  // New VkDeviceMemory successfully created.
10797 
10798  // Create new Allocation for it.
10799  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10800  pBlock->Init(
10801  m_hAllocator,
10802  m_MemoryTypeIndex,
10803  mem,
10804  allocInfo.allocationSize,
10805  m_NextBlockId++,
10806  m_Algorithm);
10807 
10808  m_Blocks.push_back(pBlock);
10809  if(pNewBlockIndex != VMA_NULL)
10810  {
10811  *pNewBlockIndex = m_Blocks.size() - 1;
10812  }
10813 
10814  return VK_SUCCESS;
10815 }
10816 
10817 #if VMA_STATS_STRING_ENABLED
10818 
10819 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10820 {
10821  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10822 
10823  json.BeginObject();
10824 
10825  if(m_IsCustomPool)
10826  {
10827  json.WriteString("MemoryTypeIndex");
10828  json.WriteNumber(m_MemoryTypeIndex);
10829 
10830  json.WriteString("BlockSize");
10831  json.WriteNumber(m_PreferredBlockSize);
10832 
10833  json.WriteString("BlockCount");
10834  json.BeginObject(true);
10835  if(m_MinBlockCount > 0)
10836  {
10837  json.WriteString("Min");
10838  json.WriteNumber((uint64_t)m_MinBlockCount);
10839  }
10840  if(m_MaxBlockCount < SIZE_MAX)
10841  {
10842  json.WriteString("Max");
10843  json.WriteNumber((uint64_t)m_MaxBlockCount);
10844  }
10845  json.WriteString("Cur");
10846  json.WriteNumber((uint64_t)m_Blocks.size());
10847  json.EndObject();
10848 
10849  if(m_FrameInUseCount > 0)
10850  {
10851  json.WriteString("FrameInUseCount");
10852  json.WriteNumber(m_FrameInUseCount);
10853  }
10854 
10855  if(m_Algorithm != 0)
10856  {
10857  json.WriteString("Algorithm");
10858  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10859  }
10860  }
10861  else
10862  {
10863  json.WriteString("PreferredBlockSize");
10864  json.WriteNumber(m_PreferredBlockSize);
10865  }
10866 
10867  json.WriteString("Blocks");
10868  json.BeginObject();
10869  for(size_t i = 0; i < m_Blocks.size(); ++i)
10870  {
10871  json.BeginString();
10872  json.ContinueString(m_Blocks[i]->GetId());
10873  json.EndString();
10874 
10875  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10876  }
10877  json.EndObject();
10878 
10879  json.EndObject();
10880 }
10881 
10882 #endif // #if VMA_STATS_STRING_ENABLED
10883 
10884 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10885  VmaAllocator hAllocator,
10886  uint32_t currentFrameIndex)
10887 {
10888  if(m_pDefragmentator == VMA_NULL)
10889  {
10890  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10891  hAllocator,
10892  this,
10893  currentFrameIndex);
10894  }
10895 
10896  return m_pDefragmentator;
10897 }
10898 
10899 VkResult VmaBlockVector::Defragment(
10900  VmaDefragmentationStats* pDefragmentationStats,
10901  VkDeviceSize& maxBytesToMove,
10902  uint32_t& maxAllocationsToMove)
10903 {
10904  if(m_pDefragmentator == VMA_NULL)
10905  {
10906  return VK_SUCCESS;
10907  }
10908 
10909  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10910 
10911  // Defragment.
10912  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10913 
10914  // Accumulate statistics.
10915  if(pDefragmentationStats != VMA_NULL)
10916  {
10917  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10918  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10919  pDefragmentationStats->bytesMoved += bytesMoved;
10920  pDefragmentationStats->allocationsMoved += allocationsMoved;
10921  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10922  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10923  maxBytesToMove -= bytesMoved;
10924  maxAllocationsToMove -= allocationsMoved;
10925  }
10926 
10927  // Free empty blocks.
10928  m_HasEmptyBlock = false;
10929  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10930  {
10931  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10932  if(pBlock->m_pMetadata->IsEmpty())
10933  {
10934  if(m_Blocks.size() > m_MinBlockCount)
10935  {
10936  if(pDefragmentationStats != VMA_NULL)
10937  {
10938  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10939  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10940  }
10941 
10942  VmaVectorRemove(m_Blocks, blockIndex);
10943  pBlock->Destroy(m_hAllocator);
10944  vma_delete(m_hAllocator, pBlock);
10945  }
10946  else
10947  {
10948  m_HasEmptyBlock = true;
10949  }
10950  }
10951  }
10952 
10953  return result;
10954 }
10955 
10956 void VmaBlockVector::DestroyDefragmentator()
10957 {
10958  if(m_pDefragmentator != VMA_NULL)
10959  {
10960  vma_delete(m_hAllocator, m_pDefragmentator);
10961  m_pDefragmentator = VMA_NULL;
10962  }
10963 }
10964 
10965 void VmaBlockVector::MakePoolAllocationsLost(
10966  uint32_t currentFrameIndex,
10967  size_t* pLostAllocationCount)
10968 {
10969  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10970  size_t lostAllocationCount = 0;
10971  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10972  {
10973  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10974  VMA_ASSERT(pBlock);
10975  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10976  }
10977  if(pLostAllocationCount != VMA_NULL)
10978  {
10979  *pLostAllocationCount = lostAllocationCount;
10980  }
10981 }
10982 
10983 VkResult VmaBlockVector::CheckCorruption()
10984 {
10985  if(!IsCorruptionDetectionEnabled())
10986  {
10987  return VK_ERROR_FEATURE_NOT_PRESENT;
10988  }
10989 
10990  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10991  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10992  {
10993  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10994  VMA_ASSERT(pBlock);
10995  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10996  if(res != VK_SUCCESS)
10997  {
10998  return res;
10999  }
11000  }
11001  return VK_SUCCESS;
11002 }
11003 
11004 void VmaBlockVector::AddStats(VmaStats* pStats)
11005 {
11006  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11007  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11008 
11009  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11010 
11011  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11012  {
11013  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11014  VMA_ASSERT(pBlock);
11015  VMA_HEAVY_ASSERT(pBlock->Validate());
11016  VmaStatInfo allocationStatInfo;
11017  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11018  VmaAddStatInfo(pStats->total, allocationStatInfo);
11019  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11020  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11021  }
11022 }
11023 
11025 // VmaDefragmentator members definition
11026 
11027 VmaDefragmentator::VmaDefragmentator(
11028  VmaAllocator hAllocator,
11029  VmaBlockVector* pBlockVector,
11030  uint32_t currentFrameIndex) :
11031  m_hAllocator(hAllocator),
11032  m_pBlockVector(pBlockVector),
11033  m_CurrentFrameIndex(currentFrameIndex),
11034  m_BytesMoved(0),
11035  m_AllocationsMoved(0),
11036  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11037  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11038 {
11039  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11040 }
11041 
11042 VmaDefragmentator::~VmaDefragmentator()
11043 {
11044  for(size_t i = m_Blocks.size(); i--; )
11045  {
11046  vma_delete(m_hAllocator, m_Blocks[i]);
11047  }
11048 }
11049 
11050 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11051 {
11052  AllocationInfo allocInfo;
11053  allocInfo.m_hAllocation = hAlloc;
11054  allocInfo.m_pChanged = pChanged;
11055  m_Allocations.push_back(allocInfo);
11056 }
11057 
11058 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11059 {
11060  // It has already been mapped for defragmentation.
11061  if(m_pMappedDataForDefragmentation)
11062  {
11063  *ppMappedData = m_pMappedDataForDefragmentation;
11064  return VK_SUCCESS;
11065  }
11066 
11067  // It is originally mapped.
11068  if(m_pBlock->GetMappedData())
11069  {
11070  *ppMappedData = m_pBlock->GetMappedData();
11071  return VK_SUCCESS;
11072  }
11073 
11074  // Map on first usage.
11075  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11076  *ppMappedData = m_pMappedDataForDefragmentation;
11077  return res;
11078 }
11079 
11080 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11081 {
11082  if(m_pMappedDataForDefragmentation != VMA_NULL)
11083  {
11084  m_pBlock->Unmap(hAllocator, 1);
11085  }
11086 }
11087 
11088 VkResult VmaDefragmentator::DefragmentRound(
11089  VkDeviceSize maxBytesToMove,
11090  uint32_t maxAllocationsToMove)
11091 {
11092  if(m_Blocks.empty())
11093  {
11094  return VK_SUCCESS;
11095  }
11096 
11097  size_t srcBlockIndex = m_Blocks.size() - 1;
11098  size_t srcAllocIndex = SIZE_MAX;
11099  for(;;)
11100  {
11101  // 1. Find next allocation to move.
11102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11103  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11105  {
11106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11107  {
11108  // Finished: no more allocations to process.
11109  if(srcBlockIndex == 0)
11110  {
11111  return VK_SUCCESS;
11112  }
11113  else
11114  {
11115  --srcBlockIndex;
11116  srcAllocIndex = SIZE_MAX;
11117  }
11118  }
11119  else
11120  {
11121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11122  }
11123  }
11124 
11125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11127 
11128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11132 
11133  // 2. Try to find new place for this allocation in preceding or current block.
11134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11135  {
11136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11137  VmaAllocationRequest dstAllocRequest;
11138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11139  m_CurrentFrameIndex,
11140  m_pBlockVector->GetFrameInUseCount(),
11141  m_pBlockVector->GetBufferImageGranularity(),
11142  size,
11143  alignment,
11144  false, // upperAddress
11145  suballocType,
11146  false, // canMakeOtherLost
11148  &dstAllocRequest) &&
11149  MoveMakesSense(
11150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11151  {
11152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11153 
11154  // Reached limit on number of allocations or bytes to move.
11155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11156  (m_BytesMoved + size > maxBytesToMove))
11157  {
11158  return VK_INCOMPLETE;
11159  }
11160 
11161  void* pDstMappedData = VMA_NULL;
11162  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11163  if(res != VK_SUCCESS)
11164  {
11165  return res;
11166  }
11167 
11168  void* pSrcMappedData = VMA_NULL;
11169  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11170  if(res != VK_SUCCESS)
11171  {
11172  return res;
11173  }
11174 
11175  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11176  memcpy(
11177  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11178  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11179  static_cast<size_t>(size));
11180 
11181  if(VMA_DEBUG_MARGIN > 0)
11182  {
11183  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11184  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11185  }
11186 
11187  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11188  dstAllocRequest,
11189  suballocType,
11190  size,
11191  false, // upperAddress
11192  allocInfo.m_hAllocation);
11193  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11194 
11195  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11196 
11197  if(allocInfo.m_pChanged != VMA_NULL)
11198  {
11199  *allocInfo.m_pChanged = VK_TRUE;
11200  }
11201 
11202  ++m_AllocationsMoved;
11203  m_BytesMoved += size;
11204 
11205  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11206 
11207  break;
11208  }
11209  }
11210 
11211  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11212 
11213  if(srcAllocIndex > 0)
11214  {
11215  --srcAllocIndex;
11216  }
11217  else
11218  {
11219  if(srcBlockIndex > 0)
11220  {
11221  --srcBlockIndex;
11222  srcAllocIndex = SIZE_MAX;
11223  }
11224  else
11225  {
11226  return VK_SUCCESS;
11227  }
11228  }
11229  }
11230 }
11231 
11232 VkResult VmaDefragmentator::Defragment(
11233  VkDeviceSize maxBytesToMove,
11234  uint32_t maxAllocationsToMove)
11235 {
11236  if(m_Allocations.empty())
11237  {
11238  return VK_SUCCESS;
11239  }
11240 
11241  // Create block info for each block.
11242  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11243  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11244  {
11245  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11246  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11247  m_Blocks.push_back(pBlockInfo);
11248  }
11249 
11250  // Sort them by m_pBlock pointer value.
11251  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11252 
11253  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11254  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11255  {
11256  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11257  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11258  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11259  {
11260  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11261  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11262  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11263  {
11264  (*it)->m_Allocations.push_back(allocInfo);
11265  }
11266  else
11267  {
11268  VMA_ASSERT(0);
11269  }
11270  }
11271  }
11272  m_Allocations.clear();
11273 
11274  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11275  {
11276  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11277  pBlockInfo->CalcHasNonMovableAllocations();
11278  pBlockInfo->SortAllocationsBySizeDescecnding();
11279  }
11280 
11281  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11282  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11283 
11284  // Execute defragmentation rounds (the main part).
11285  VkResult result = VK_SUCCESS;
11286  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11287  {
11288  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11289  }
11290 
11291  // Unmap blocks that were mapped for defragmentation.
11292  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11293  {
11294  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11295  }
11296 
11297  return result;
11298 }
11299 
11300 bool VmaDefragmentator::MoveMakesSense(
11301  size_t dstBlockIndex, VkDeviceSize dstOffset,
11302  size_t srcBlockIndex, VkDeviceSize srcOffset)
11303 {
11304  if(dstBlockIndex < srcBlockIndex)
11305  {
11306  return true;
11307  }
11308  if(dstBlockIndex > srcBlockIndex)
11309  {
11310  return false;
11311  }
11312  if(dstOffset < srcOffset)
11313  {
11314  return true;
11315  }
11316  return false;
11317 }
11318 
11320 // VmaRecorder
11321 
11322 #if VMA_RECORDING_ENABLED
11323 
11324 VmaRecorder::VmaRecorder() :
11325  m_UseMutex(true),
11326  m_Flags(0),
11327  m_File(VMA_NULL),
11328  m_Freq(INT64_MAX),
11329  m_StartCounter(INT64_MAX)
11330 {
11331 }
11332 
11333 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11334 {
11335  m_UseMutex = useMutex;
11336  m_Flags = settings.flags;
11337 
11338  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11339  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11340 
11341  // Open file for writing.
11342  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11343  if(err != 0)
11344  {
11345  return VK_ERROR_INITIALIZATION_FAILED;
11346  }
11347 
11348  // Write header.
11349  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11350  fprintf(m_File, "%s\n", "1,3");
11351 
11352  return VK_SUCCESS;
11353 }
11354 
11355 VmaRecorder::~VmaRecorder()
11356 {
11357  if(m_File != VMA_NULL)
11358  {
11359  fclose(m_File);
11360  }
11361 }
11362 
11363 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11364 {
11365  CallParams callParams;
11366  GetBasicParams(callParams);
11367 
11368  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11369  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11370  Flush();
11371 }
11372 
11373 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11374 {
11375  CallParams callParams;
11376  GetBasicParams(callParams);
11377 
11378  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11379  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11380  Flush();
11381 }
11382 
11383 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11384 {
11385  CallParams callParams;
11386  GetBasicParams(callParams);
11387 
11388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11389  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11390  createInfo.memoryTypeIndex,
11391  createInfo.flags,
11392  createInfo.blockSize,
11393  (uint64_t)createInfo.minBlockCount,
11394  (uint64_t)createInfo.maxBlockCount,
11395  createInfo.frameInUseCount,
11396  pool);
11397  Flush();
11398 }
11399 
11400 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11401 {
11402  CallParams callParams;
11403  GetBasicParams(callParams);
11404 
11405  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11406  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11407  pool);
11408  Flush();
11409 }
11410 
11411 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11412  const VkMemoryRequirements& vkMemReq,
11413  const VmaAllocationCreateInfo& createInfo,
11414  VmaAllocation allocation)
11415 {
11416  CallParams callParams;
11417  GetBasicParams(callParams);
11418 
11419  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11420  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11421  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11422  vkMemReq.size,
11423  vkMemReq.alignment,
11424  vkMemReq.memoryTypeBits,
11425  createInfo.flags,
11426  createInfo.usage,
11427  createInfo.requiredFlags,
11428  createInfo.preferredFlags,
11429  createInfo.memoryTypeBits,
11430  createInfo.pool,
11431  allocation,
11432  userDataStr.GetString());
11433  Flush();
11434 }
11435 
11436 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11437  const VkMemoryRequirements& vkMemReq,
11438  bool requiresDedicatedAllocation,
11439  bool prefersDedicatedAllocation,
11440  const VmaAllocationCreateInfo& createInfo,
11441  VmaAllocation allocation)
11442 {
11443  CallParams callParams;
11444  GetBasicParams(callParams);
11445 
11446  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11447  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11448  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11449  vkMemReq.size,
11450  vkMemReq.alignment,
11451  vkMemReq.memoryTypeBits,
11452  requiresDedicatedAllocation ? 1 : 0,
11453  prefersDedicatedAllocation ? 1 : 0,
11454  createInfo.flags,
11455  createInfo.usage,
11456  createInfo.requiredFlags,
11457  createInfo.preferredFlags,
11458  createInfo.memoryTypeBits,
11459  createInfo.pool,
11460  allocation,
11461  userDataStr.GetString());
11462  Flush();
11463 }
11464 
11465 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11466  const VkMemoryRequirements& vkMemReq,
11467  bool requiresDedicatedAllocation,
11468  bool prefersDedicatedAllocation,
11469  const VmaAllocationCreateInfo& createInfo,
11470  VmaAllocation allocation)
11471 {
11472  CallParams callParams;
11473  GetBasicParams(callParams);
11474 
11475  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11476  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11477  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11478  vkMemReq.size,
11479  vkMemReq.alignment,
11480  vkMemReq.memoryTypeBits,
11481  requiresDedicatedAllocation ? 1 : 0,
11482  prefersDedicatedAllocation ? 1 : 0,
11483  createInfo.flags,
11484  createInfo.usage,
11485  createInfo.requiredFlags,
11486  createInfo.preferredFlags,
11487  createInfo.memoryTypeBits,
11488  createInfo.pool,
11489  allocation,
11490  userDataStr.GetString());
11491  Flush();
11492 }
11493 
11494 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11495  VmaAllocation allocation)
11496 {
11497  CallParams callParams;
11498  GetBasicParams(callParams);
11499 
11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11501  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11502  allocation);
11503  Flush();
11504 }
11505 
11506 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11507  VmaAllocation allocation,
11508  const void* pUserData)
11509 {
11510  CallParams callParams;
11511  GetBasicParams(callParams);
11512 
11513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11514  UserDataString userDataStr(
11515  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11516  pUserData);
11517  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11518  allocation,
11519  userDataStr.GetString());
11520  Flush();
11521 }
11522 
11523 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11524  VmaAllocation allocation)
11525 {
11526  CallParams callParams;
11527  GetBasicParams(callParams);
11528 
11529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11530  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11531  allocation);
11532  Flush();
11533 }
11534 
11535 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11536  VmaAllocation allocation)
11537 {
11538  CallParams callParams;
11539  GetBasicParams(callParams);
11540 
11541  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11542  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11543  allocation);
11544  Flush();
11545 }
11546 
11547 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11548  VmaAllocation allocation)
11549 {
11550  CallParams callParams;
11551  GetBasicParams(callParams);
11552 
11553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11554  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11555  allocation);
11556  Flush();
11557 }
11558 
11559 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11560  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11561 {
11562  CallParams callParams;
11563  GetBasicParams(callParams);
11564 
11565  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11566  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11567  allocation,
11568  offset,
11569  size);
11570  Flush();
11571 }
11572 
11573 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11575 {
11576  CallParams callParams;
11577  GetBasicParams(callParams);
11578 
11579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11580  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11581  allocation,
11582  offset,
11583  size);
11584  Flush();
11585 }
11586 
11587 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11588  const VkBufferCreateInfo& bufCreateInfo,
11589  const VmaAllocationCreateInfo& allocCreateInfo,
11590  VmaAllocation allocation)
11591 {
11592  CallParams callParams;
11593  GetBasicParams(callParams);
11594 
11595  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11596  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11597  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11598  bufCreateInfo.flags,
11599  bufCreateInfo.size,
11600  bufCreateInfo.usage,
11601  bufCreateInfo.sharingMode,
11602  allocCreateInfo.flags,
11603  allocCreateInfo.usage,
11604  allocCreateInfo.requiredFlags,
11605  allocCreateInfo.preferredFlags,
11606  allocCreateInfo.memoryTypeBits,
11607  allocCreateInfo.pool,
11608  allocation,
11609  userDataStr.GetString());
11610  Flush();
11611 }
11612 
11613 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11614  const VkImageCreateInfo& imageCreateInfo,
11615  const VmaAllocationCreateInfo& allocCreateInfo,
11616  VmaAllocation allocation)
11617 {
11618  CallParams callParams;
11619  GetBasicParams(callParams);
11620 
11621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11622  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11623  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11624  imageCreateInfo.flags,
11625  imageCreateInfo.imageType,
11626  imageCreateInfo.format,
11627  imageCreateInfo.extent.width,
11628  imageCreateInfo.extent.height,
11629  imageCreateInfo.extent.depth,
11630  imageCreateInfo.mipLevels,
11631  imageCreateInfo.arrayLayers,
11632  imageCreateInfo.samples,
11633  imageCreateInfo.tiling,
11634  imageCreateInfo.usage,
11635  imageCreateInfo.sharingMode,
11636  imageCreateInfo.initialLayout,
11637  allocCreateInfo.flags,
11638  allocCreateInfo.usage,
11639  allocCreateInfo.requiredFlags,
11640  allocCreateInfo.preferredFlags,
11641  allocCreateInfo.memoryTypeBits,
11642  allocCreateInfo.pool,
11643  allocation,
11644  userDataStr.GetString());
11645  Flush();
11646 }
11647 
11648 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11649  VmaAllocation allocation)
11650 {
11651  CallParams callParams;
11652  GetBasicParams(callParams);
11653 
11654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11655  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11656  allocation);
11657  Flush();
11658 }
11659 
11660 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11661  VmaAllocation allocation)
11662 {
11663  CallParams callParams;
11664  GetBasicParams(callParams);
11665 
11666  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11667  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11668  allocation);
11669  Flush();
11670 }
11671 
11672 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11673  VmaAllocation allocation)
11674 {
11675  CallParams callParams;
11676  GetBasicParams(callParams);
11677 
11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11679  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11680  allocation);
11681  Flush();
11682 }
11683 
11684 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11685  VmaAllocation allocation)
11686 {
11687  CallParams callParams;
11688  GetBasicParams(callParams);
11689 
11690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11691  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11692  allocation);
11693  Flush();
11694 }
11695 
11696 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11697  VmaPool pool)
11698 {
11699  CallParams callParams;
11700  GetBasicParams(callParams);
11701 
11702  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11703  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11704  pool);
11705  Flush();
11706 }
11707 
11708 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11709 {
11710  if(pUserData != VMA_NULL)
11711  {
11712  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11713  {
11714  m_Str = (const char*)pUserData;
11715  }
11716  else
11717  {
11718  sprintf_s(m_PtrStr, "%p", pUserData);
11719  m_Str = m_PtrStr;
11720  }
11721  }
11722  else
11723  {
11724  m_Str = "";
11725  }
11726 }
11727 
11728 void VmaRecorder::WriteConfiguration(
11729  const VkPhysicalDeviceProperties& devProps,
11730  const VkPhysicalDeviceMemoryProperties& memProps,
11731  bool dedicatedAllocationExtensionEnabled)
11732 {
11733  fprintf(m_File, "Config,Begin\n");
11734 
11735  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11736  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11737  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11738  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11739  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11740  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11741 
11742  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11743  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11744  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11745 
11746  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11747  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11748  {
11749  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11750  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11751  }
11752  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11753  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11754  {
11755  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11756  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11757  }
11758 
11759  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11760 
11761  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11762  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11763  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11764  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11765  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11766  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11767  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11768  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11769  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11770 
11771  fprintf(m_File, "Config,End\n");
11772 }
11773 
11774 void VmaRecorder::GetBasicParams(CallParams& outParams)
11775 {
11776  outParams.threadId = GetCurrentThreadId();
11777 
11778  LARGE_INTEGER counter;
11779  QueryPerformanceCounter(&counter);
11780  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11781 }
11782 
11783 void VmaRecorder::Flush()
11784 {
11785  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11786  {
11787  fflush(m_File);
11788  }
11789 }
11790 
11791 #endif // #if VMA_RECORDING_ENABLED
11792 
11794 // VmaAllocator_T
11795 
11796 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11797  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11798  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11799  m_hDevice(pCreateInfo->device),
11800  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11801  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11802  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11803  m_PreferredLargeHeapBlockSize(0),
11804  m_PhysicalDevice(pCreateInfo->physicalDevice),
11805  m_CurrentFrameIndex(0),
11806  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11807  m_NextPoolId(0)
11809  ,m_pRecorder(VMA_NULL)
11810 #endif
11811 {
11812  if(VMA_DEBUG_DETECT_CORRUPTION)
11813  {
11814  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11815  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11816  }
11817 
11818  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11819 
11820 #if !(VMA_DEDICATED_ALLOCATION)
11822  {
11823  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11824  }
11825 #endif
11826 
11827  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11828  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11829  memset(&m_MemProps, 0, sizeof(m_MemProps));
11830 
11831  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11832  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11833 
11834  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11835  {
11836  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11837  }
11838 
11839  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11840  {
11841  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11842  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11843  }
11844 
11845  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11846 
11847  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11848  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11849 
11850  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11851  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11852  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11853  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11854 
11855  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11856  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11857 
11858  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11859  {
11860  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11861  {
11862  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11863  if(limit != VK_WHOLE_SIZE)
11864  {
11865  m_HeapSizeLimit[heapIndex] = limit;
11866  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11867  {
11868  m_MemProps.memoryHeaps[heapIndex].size = limit;
11869  }
11870  }
11871  }
11872  }
11873 
11874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11875  {
11876  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11877 
11878  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11879  this,
11880  memTypeIndex,
11881  preferredBlockSize,
11882  0,
11883  SIZE_MAX,
11884  GetBufferImageGranularity(),
11885  pCreateInfo->frameInUseCount,
11886  false, // isCustomPool
11887  false, // explicitBlockSize
11888  false); // linearAlgorithm
11889  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11890  // becase minBlockCount is 0.
11891  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11892 
11893  }
11894 }
11895 
11896 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11897 {
11898  VkResult res = VK_SUCCESS;
11899 
11900  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11901  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11902  {
11903 #if VMA_RECORDING_ENABLED
11904  m_pRecorder = vma_new(this, VmaRecorder)();
11905  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11906  if(res != VK_SUCCESS)
11907  {
11908  return res;
11909  }
11910  m_pRecorder->WriteConfiguration(
11911  m_PhysicalDeviceProperties,
11912  m_MemProps,
11913  m_UseKhrDedicatedAllocation);
11914  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11915 #else
11916  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11917  return VK_ERROR_FEATURE_NOT_PRESENT;
11918 #endif
11919  }
11920 
11921  return res;
11922 }
11923 
11924 VmaAllocator_T::~VmaAllocator_T()
11925 {
11926 #if VMA_RECORDING_ENABLED
11927  if(m_pRecorder != VMA_NULL)
11928  {
11929  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11930  vma_delete(this, m_pRecorder);
11931  }
11932 #endif
11933 
11934  VMA_ASSERT(m_Pools.empty());
11935 
11936  for(size_t i = GetMemoryTypeCount(); i--; )
11937  {
11938  vma_delete(this, m_pDedicatedAllocations[i]);
11939  vma_delete(this, m_pBlockVectors[i]);
11940  }
11941 }
11942 
11943 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11944 {
11945 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11946  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11947  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11948  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11949  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11950  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11951  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11952  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11953  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11954  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11955  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11956  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11957  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11958  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11959  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11960  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11961  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11962 #if VMA_DEDICATED_ALLOCATION
11963  if(m_UseKhrDedicatedAllocation)
11964  {
11965  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11966  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11967  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11968  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11969  }
11970 #endif // #if VMA_DEDICATED_ALLOCATION
11971 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11972 
11973 #define VMA_COPY_IF_NOT_NULL(funcName) \
11974  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11975 
11976  if(pVulkanFunctions != VMA_NULL)
11977  {
11978  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11979  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11980  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11981  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11982  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11983  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11984  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11985  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11986  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11987  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11988  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11989  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11990  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11991  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11992  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11993  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11994 #if VMA_DEDICATED_ALLOCATION
11995  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11996  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11997 #endif
11998  }
11999 
12000 #undef VMA_COPY_IF_NOT_NULL
12001 
12002  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12003  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12004  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12005  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12006  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12007  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12008  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12009  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12010  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12011  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12012  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12013  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12014  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12015  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12016  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12017  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12018  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12019  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12020 #if VMA_DEDICATED_ALLOCATION
12021  if(m_UseKhrDedicatedAllocation)
12022  {
12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12025  }
12026 #endif
12027 }
12028 
12029 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12030 {
12031  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12032  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12033  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12034  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12035 }
12036 
12037 VkResult VmaAllocator_T::AllocateMemoryOfType(
12038  VkDeviceSize size,
12039  VkDeviceSize alignment,
12040  bool dedicatedAllocation,
12041  VkBuffer dedicatedBuffer,
12042  VkImage dedicatedImage,
12043  const VmaAllocationCreateInfo& createInfo,
12044  uint32_t memTypeIndex,
12045  VmaSuballocationType suballocType,
12046  VmaAllocation* pAllocation)
12047 {
12048  VMA_ASSERT(pAllocation != VMA_NULL);
12049  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12050 
12051  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12052 
12053  // If memory type is not HOST_VISIBLE, disable MAPPED.
12054  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12055  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12056  {
12057  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12058  }
12059 
12060  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12061  VMA_ASSERT(blockVector);
12062 
12063  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12064  bool preferDedicatedMemory =
12065  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12066  dedicatedAllocation ||
12067  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12068  size > preferredBlockSize / 2;
12069 
12070  if(preferDedicatedMemory &&
12071  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12072  finalCreateInfo.pool == VK_NULL_HANDLE)
12073  {
12075  }
12076 
12077  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12078  {
12079  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12080  {
12081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12082  }
12083  else
12084  {
12085  return AllocateDedicatedMemory(
12086  size,
12087  suballocType,
12088  memTypeIndex,
12089  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12090  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12091  finalCreateInfo.pUserData,
12092  dedicatedBuffer,
12093  dedicatedImage,
12094  pAllocation);
12095  }
12096  }
12097  else
12098  {
12099  VkResult res = blockVector->Allocate(
12100  VK_NULL_HANDLE, // hCurrentPool
12101  m_CurrentFrameIndex.load(),
12102  size,
12103  alignment,
12104  finalCreateInfo,
12105  suballocType,
12106  pAllocation);
12107  if(res == VK_SUCCESS)
12108  {
12109  return res;
12110  }
12111 
12112  // 5. Try dedicated memory.
12113  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12114  {
12115  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12116  }
12117  else
12118  {
12119  res = AllocateDedicatedMemory(
12120  size,
12121  suballocType,
12122  memTypeIndex,
12123  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12124  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12125  finalCreateInfo.pUserData,
12126  dedicatedBuffer,
12127  dedicatedImage,
12128  pAllocation);
12129  if(res == VK_SUCCESS)
12130  {
12131  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12132  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12133  return VK_SUCCESS;
12134  }
12135  else
12136  {
12137  // Everything failed: Return error code.
12138  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12139  return res;
12140  }
12141  }
12142  }
12143 }
12144 
12145 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12146  VkDeviceSize size,
12147  VmaSuballocationType suballocType,
12148  uint32_t memTypeIndex,
12149  bool map,
12150  bool isUserDataString,
12151  void* pUserData,
12152  VkBuffer dedicatedBuffer,
12153  VkImage dedicatedImage,
12154  VmaAllocation* pAllocation)
12155 {
12156  VMA_ASSERT(pAllocation);
12157 
12158  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12159  allocInfo.memoryTypeIndex = memTypeIndex;
12160  allocInfo.allocationSize = size;
12161 
12162 #if VMA_DEDICATED_ALLOCATION
12163  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12164  if(m_UseKhrDedicatedAllocation)
12165  {
12166  if(dedicatedBuffer != VK_NULL_HANDLE)
12167  {
12168  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12169  dedicatedAllocInfo.buffer = dedicatedBuffer;
12170  allocInfo.pNext = &dedicatedAllocInfo;
12171  }
12172  else if(dedicatedImage != VK_NULL_HANDLE)
12173  {
12174  dedicatedAllocInfo.image = dedicatedImage;
12175  allocInfo.pNext = &dedicatedAllocInfo;
12176  }
12177  }
12178 #endif // #if VMA_DEDICATED_ALLOCATION
12179 
12180  // Allocate VkDeviceMemory.
12181  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12182  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12183  if(res < 0)
12184  {
12185  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12186  return res;
12187  }
12188 
12189  void* pMappedData = VMA_NULL;
12190  if(map)
12191  {
12192  res = (*m_VulkanFunctions.vkMapMemory)(
12193  m_hDevice,
12194  hMemory,
12195  0,
12196  VK_WHOLE_SIZE,
12197  0,
12198  &pMappedData);
12199  if(res < 0)
12200  {
12201  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12202  FreeVulkanMemory(memTypeIndex, size, hMemory);
12203  return res;
12204  }
12205  }
12206 
12207  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12208  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12209  (*pAllocation)->SetUserData(this, pUserData);
12210  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12211  {
12212  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12213  }
12214 
12215  // Register it in m_pDedicatedAllocations.
12216  {
12217  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12218  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12219  VMA_ASSERT(pDedicatedAllocations);
12220  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12221  }
12222 
12223  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12224 
12225  return VK_SUCCESS;
12226 }
12227 
12228 void VmaAllocator_T::GetBufferMemoryRequirements(
12229  VkBuffer hBuffer,
12230  VkMemoryRequirements& memReq,
12231  bool& requiresDedicatedAllocation,
12232  bool& prefersDedicatedAllocation) const
12233 {
12234 #if VMA_DEDICATED_ALLOCATION
12235  if(m_UseKhrDedicatedAllocation)
12236  {
12237  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12238  memReqInfo.buffer = hBuffer;
12239 
12240  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12241 
12242  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12243  memReq2.pNext = &memDedicatedReq;
12244 
12245  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12246 
12247  memReq = memReq2.memoryRequirements;
12248  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12249  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12250  }
12251  else
12252 #endif // #if VMA_DEDICATED_ALLOCATION
12253  {
12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12255  requiresDedicatedAllocation = false;
12256  prefersDedicatedAllocation = false;
12257  }
12258 }
12259 
12260 void VmaAllocator_T::GetImageMemoryRequirements(
12261  VkImage hImage,
12262  VkMemoryRequirements& memReq,
12263  bool& requiresDedicatedAllocation,
12264  bool& prefersDedicatedAllocation) const
12265 {
12266 #if VMA_DEDICATED_ALLOCATION
12267  if(m_UseKhrDedicatedAllocation)
12268  {
12269  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12270  memReqInfo.image = hImage;
12271 
12272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12273 
12274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12275  memReq2.pNext = &memDedicatedReq;
12276 
12277  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12278 
12279  memReq = memReq2.memoryRequirements;
12280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12282  }
12283  else
12284 #endif // #if VMA_DEDICATED_ALLOCATION
12285  {
12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12287  requiresDedicatedAllocation = false;
12288  prefersDedicatedAllocation = false;
12289  }
12290 }
12291 
12292 VkResult VmaAllocator_T::AllocateMemory(
12293  const VkMemoryRequirements& vkMemReq,
12294  bool requiresDedicatedAllocation,
12295  bool prefersDedicatedAllocation,
12296  VkBuffer dedicatedBuffer,
12297  VkImage dedicatedImage,
12298  const VmaAllocationCreateInfo& createInfo,
12299  VmaSuballocationType suballocType,
12300  VmaAllocation* pAllocation)
12301 {
12302  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12303 
12304  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12305  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12306  {
12307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12309  }
12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12312  {
12313  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12314  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12315  }
12316  if(requiresDedicatedAllocation)
12317  {
12318  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12319  {
12320  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12322  }
12323  if(createInfo.pool != VK_NULL_HANDLE)
12324  {
12325  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12326  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12327  }
12328  }
12329  if((createInfo.pool != VK_NULL_HANDLE) &&
12330  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12331  {
12332  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12333  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12334  }
12335 
12336  if(createInfo.pool != VK_NULL_HANDLE)
12337  {
12338  const VkDeviceSize alignmentForPool = VMA_MAX(
12339  vkMemReq.alignment,
12340  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12341  return createInfo.pool->m_BlockVector.Allocate(
12342  createInfo.pool,
12343  m_CurrentFrameIndex.load(),
12344  vkMemReq.size,
12345  alignmentForPool,
12346  createInfo,
12347  suballocType,
12348  pAllocation);
12349  }
12350  else
12351  {
12352  // Bit mask of memory Vulkan types acceptable for this allocation.
12353  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12354  uint32_t memTypeIndex = UINT32_MAX;
12355  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12356  if(res == VK_SUCCESS)
12357  {
12358  VkDeviceSize alignmentForMemType = VMA_MAX(
12359  vkMemReq.alignment,
12360  GetMemoryTypeMinAlignment(memTypeIndex));
12361 
12362  res = AllocateMemoryOfType(
12363  vkMemReq.size,
12364  alignmentForMemType,
12365  requiresDedicatedAllocation || prefersDedicatedAllocation,
12366  dedicatedBuffer,
12367  dedicatedImage,
12368  createInfo,
12369  memTypeIndex,
12370  suballocType,
12371  pAllocation);
12372  // Succeeded on first try.
12373  if(res == VK_SUCCESS)
12374  {
12375  return res;
12376  }
12377  // Allocation from this memory type failed. Try other compatible memory types.
12378  else
12379  {
12380  for(;;)
12381  {
12382  // Remove old memTypeIndex from list of possibilities.
12383  memoryTypeBits &= ~(1u << memTypeIndex);
12384  // Find alternative memTypeIndex.
12385  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12386  if(res == VK_SUCCESS)
12387  {
12388  alignmentForMemType = VMA_MAX(
12389  vkMemReq.alignment,
12390  GetMemoryTypeMinAlignment(memTypeIndex));
12391 
12392  res = AllocateMemoryOfType(
12393  vkMemReq.size,
12394  alignmentForMemType,
12395  requiresDedicatedAllocation || prefersDedicatedAllocation,
12396  dedicatedBuffer,
12397  dedicatedImage,
12398  createInfo,
12399  memTypeIndex,
12400  suballocType,
12401  pAllocation);
12402  // Allocation from this alternative memory type succeeded.
12403  if(res == VK_SUCCESS)
12404  {
12405  return res;
12406  }
12407  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12408  }
12409  // No other matching memory type index could be found.
12410  else
12411  {
12412  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12414  }
12415  }
12416  }
12417  }
12418  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12419  else
12420  return res;
12421  }
12422 }
12423 
12424 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12425 {
12426  VMA_ASSERT(allocation);
12427 
12428  if(TouchAllocation(allocation))
12429  {
12430  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12431  {
12432  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12433  }
12434 
12435  switch(allocation->GetType())
12436  {
12437  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12438  {
12439  VmaBlockVector* pBlockVector = VMA_NULL;
12440  VmaPool hPool = allocation->GetPool();
12441  if(hPool != VK_NULL_HANDLE)
12442  {
12443  pBlockVector = &hPool->m_BlockVector;
12444  }
12445  else
12446  {
12447  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12448  pBlockVector = m_pBlockVectors[memTypeIndex];
12449  }
12450  pBlockVector->Free(allocation);
12451  }
12452  break;
12453  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12454  FreeDedicatedMemory(allocation);
12455  break;
12456  default:
12457  VMA_ASSERT(0);
12458  }
12459  }
12460 
12461  allocation->SetUserData(this, VMA_NULL);
12462  vma_delete(this, allocation);
12463 }
12464 
12465 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12466 {
12467  // Initialize.
12468  InitStatInfo(pStats->total);
12469  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12470  InitStatInfo(pStats->memoryType[i]);
12471  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12472  InitStatInfo(pStats->memoryHeap[i]);
12473 
12474  // Process default pools.
12475  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12476  {
12477  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12478  VMA_ASSERT(pBlockVector);
12479  pBlockVector->AddStats(pStats);
12480  }
12481 
12482  // Process custom pools.
12483  {
12484  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12485  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12486  {
12487  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12488  }
12489  }
12490 
12491  // Process dedicated allocations.
12492  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12493  {
12494  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12495  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12496  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12497  VMA_ASSERT(pDedicatedAllocVector);
12498  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12499  {
12500  VmaStatInfo allocationStatInfo;
12501  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12502  VmaAddStatInfo(pStats->total, allocationStatInfo);
12503  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12504  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12505  }
12506  }
12507 
12508  // Postprocess.
12509  VmaPostprocessCalcStatInfo(pStats->total);
12510  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12511  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12512  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12513  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12514 }
12515 
12516 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12517 
12518 VkResult VmaAllocator_T::Defragment(
12519  VmaAllocation* pAllocations,
12520  size_t allocationCount,
12521  VkBool32* pAllocationsChanged,
12522  const VmaDefragmentationInfo* pDefragmentationInfo,
12523  VmaDefragmentationStats* pDefragmentationStats)
12524 {
12525  if(pAllocationsChanged != VMA_NULL)
12526  {
12527  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12528  }
12529  if(pDefragmentationStats != VMA_NULL)
12530  {
12531  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12532  }
12533 
12534  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12535 
12536  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12537 
12538  const size_t poolCount = m_Pools.size();
12539 
12540  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12541  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12542  {
12543  VmaAllocation hAlloc = pAllocations[allocIndex];
12544  VMA_ASSERT(hAlloc);
12545  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12546  // DedicatedAlloc cannot be defragmented.
12547  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12548  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12549  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12550  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12551  // Lost allocation cannot be defragmented.
12552  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12553  {
12554  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12555 
12556  const VmaPool hAllocPool = hAlloc->GetPool();
12557  // This allocation belongs to custom pool.
12558  if(hAllocPool != VK_NULL_HANDLE)
12559  {
12560  // Pools with linear or buddy algorithm are not defragmented.
12561  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12562  {
12563  pAllocBlockVector = &hAllocPool->m_BlockVector;
12564  }
12565  }
12566  // This allocation belongs to general pool.
12567  else
12568  {
12569  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12570  }
12571 
12572  if(pAllocBlockVector != VMA_NULL)
12573  {
12574  VmaDefragmentator* const pDefragmentator =
12575  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12576  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12577  &pAllocationsChanged[allocIndex] : VMA_NULL;
12578  pDefragmentator->AddAllocation(hAlloc, pChanged);
12579  }
12580  }
12581  }
12582 
12583  VkResult result = VK_SUCCESS;
12584 
12585  // ======== Main processing.
12586 
12587  VkDeviceSize maxBytesToMove = SIZE_MAX;
12588  uint32_t maxAllocationsToMove = UINT32_MAX;
12589  if(pDefragmentationInfo != VMA_NULL)
12590  {
12591  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12592  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12593  }
12594 
12595  // Process standard memory.
12596  for(uint32_t memTypeIndex = 0;
12597  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12598  ++memTypeIndex)
12599  {
12600  // Only HOST_VISIBLE memory types can be defragmented.
12601  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12602  {
12603  result = m_pBlockVectors[memTypeIndex]->Defragment(
12604  pDefragmentationStats,
12605  maxBytesToMove,
12606  maxAllocationsToMove);
12607  }
12608  }
12609 
12610  // Process custom pools.
12611  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12612  {
12613  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12614  pDefragmentationStats,
12615  maxBytesToMove,
12616  maxAllocationsToMove);
12617  }
12618 
12619  // ======== Destroy defragmentators.
12620 
12621  // Process custom pools.
12622  for(size_t poolIndex = poolCount; poolIndex--; )
12623  {
12624  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12625  }
12626 
12627  // Process standard memory.
12628  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12629  {
12630  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12631  {
12632  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12633  }
12634  }
12635 
12636  return result;
12637 }
12638 
12639 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12640 {
12641  if(hAllocation->CanBecomeLost())
12642  {
12643  /*
12644  Warning: This is a carefully designed algorithm.
12645  Do not modify unless you really know what you're doing :)
12646  */
12647  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12648  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12649  for(;;)
12650  {
12651  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12652  {
12653  pAllocationInfo->memoryType = UINT32_MAX;
12654  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12655  pAllocationInfo->offset = 0;
12656  pAllocationInfo->size = hAllocation->GetSize();
12657  pAllocationInfo->pMappedData = VMA_NULL;
12658  pAllocationInfo->pUserData = hAllocation->GetUserData();
12659  return;
12660  }
12661  else if(localLastUseFrameIndex == localCurrFrameIndex)
12662  {
12663  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12664  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12665  pAllocationInfo->offset = hAllocation->GetOffset();
12666  pAllocationInfo->size = hAllocation->GetSize();
12667  pAllocationInfo->pMappedData = VMA_NULL;
12668  pAllocationInfo->pUserData = hAllocation->GetUserData();
12669  return;
12670  }
12671  else // Last use time earlier than current time.
12672  {
12673  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12674  {
12675  localLastUseFrameIndex = localCurrFrameIndex;
12676  }
12677  }
12678  }
12679  }
12680  else
12681  {
12682 #if VMA_STATS_STRING_ENABLED
12683  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12684  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12685  for(;;)
12686  {
12687  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12688  if(localLastUseFrameIndex == localCurrFrameIndex)
12689  {
12690  break;
12691  }
12692  else // Last use time earlier than current time.
12693  {
12694  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12695  {
12696  localLastUseFrameIndex = localCurrFrameIndex;
12697  }
12698  }
12699  }
12700 #endif
12701 
12702  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12703  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12704  pAllocationInfo->offset = hAllocation->GetOffset();
12705  pAllocationInfo->size = hAllocation->GetSize();
12706  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12707  pAllocationInfo->pUserData = hAllocation->GetUserData();
12708  }
12709 }
12710 
12711 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12712 {
12713  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12714  if(hAllocation->CanBecomeLost())
12715  {
12716  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12717  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12718  for(;;)
12719  {
12720  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12721  {
12722  return false;
12723  }
12724  else if(localLastUseFrameIndex == localCurrFrameIndex)
12725  {
12726  return true;
12727  }
12728  else // Last use time earlier than current time.
12729  {
12730  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12731  {
12732  localLastUseFrameIndex = localCurrFrameIndex;
12733  }
12734  }
12735  }
12736  }
12737  else
12738  {
12739 #if VMA_STATS_STRING_ENABLED
12740  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12741  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12742  for(;;)
12743  {
12744  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12745  if(localLastUseFrameIndex == localCurrFrameIndex)
12746  {
12747  break;
12748  }
12749  else // Last use time earlier than current time.
12750  {
12751  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12752  {
12753  localLastUseFrameIndex = localCurrFrameIndex;
12754  }
12755  }
12756  }
12757 #endif
12758 
12759  return true;
12760  }
12761 }
12762 
12763 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12764 {
12765  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12766 
12767  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12768 
12769  if(newCreateInfo.maxBlockCount == 0)
12770  {
12771  newCreateInfo.maxBlockCount = SIZE_MAX;
12772  }
12773  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12774  {
12775  return VK_ERROR_INITIALIZATION_FAILED;
12776  }
12777 
12778  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12779 
12780  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12781 
12782  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12783  if(res != VK_SUCCESS)
12784  {
12785  vma_delete(this, *pPool);
12786  *pPool = VMA_NULL;
12787  return res;
12788  }
12789 
12790  // Add to m_Pools.
12791  {
12792  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12793  (*pPool)->SetId(m_NextPoolId++);
12794  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12795  }
12796 
12797  return VK_SUCCESS;
12798 }
12799 
12800 void VmaAllocator_T::DestroyPool(VmaPool pool)
12801 {
12802  // Remove from m_Pools.
12803  {
12804  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12805  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12806  VMA_ASSERT(success && "Pool not found in Allocator.");
12807  }
12808 
12809  vma_delete(this, pool);
12810 }
12811 
12812 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12813 {
12814  pool->m_BlockVector.GetPoolStats(pPoolStats);
12815 }
12816 
12817 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12818 {
12819  m_CurrentFrameIndex.store(frameIndex);
12820 }
12821 
12822 void VmaAllocator_T::MakePoolAllocationsLost(
12823  VmaPool hPool,
12824  size_t* pLostAllocationCount)
12825 {
12826  hPool->m_BlockVector.MakePoolAllocationsLost(
12827  m_CurrentFrameIndex.load(),
12828  pLostAllocationCount);
12829 }
12830 
12831 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12832 {
12833  return hPool->m_BlockVector.CheckCorruption();
12834 }
12835 
12836 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12837 {
12838  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12839 
12840  // Process default pools.
12841  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12842  {
12843  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12844  {
12845  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12846  VMA_ASSERT(pBlockVector);
12847  VkResult localRes = pBlockVector->CheckCorruption();
12848  switch(localRes)
12849  {
12850  case VK_ERROR_FEATURE_NOT_PRESENT:
12851  break;
12852  case VK_SUCCESS:
12853  finalRes = VK_SUCCESS;
12854  break;
12855  default:
12856  return localRes;
12857  }
12858  }
12859  }
12860 
12861  // Process custom pools.
12862  {
12863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12864  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12865  {
12866  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12867  {
12868  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12869  switch(localRes)
12870  {
12871  case VK_ERROR_FEATURE_NOT_PRESENT:
12872  break;
12873  case VK_SUCCESS:
12874  finalRes = VK_SUCCESS;
12875  break;
12876  default:
12877  return localRes;
12878  }
12879  }
12880  }
12881  }
12882 
12883  return finalRes;
12884 }
12885 
12886 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12887 {
12888  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12889  (*pAllocation)->InitLost();
12890 }
12891 
12892 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12893 {
12894  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12895 
12896  VkResult res;
12897  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12898  {
12899  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12900  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12901  {
12902  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12903  if(res == VK_SUCCESS)
12904  {
12905  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12906  }
12907  }
12908  else
12909  {
12910  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12911  }
12912  }
12913  else
12914  {
12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12916  }
12917 
12918  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12919  {
12920  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12921  }
12922 
12923  return res;
12924 }
12925 
12926 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12927 {
12928  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12929  {
12930  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12931  }
12932 
12933  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12934 
12935  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12936  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12937  {
12938  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12939  m_HeapSizeLimit[heapIndex] += size;
12940  }
12941 }
12942 
12943 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12944 {
12945  if(hAllocation->CanBecomeLost())
12946  {
12947  return VK_ERROR_MEMORY_MAP_FAILED;
12948  }
12949 
12950  switch(hAllocation->GetType())
12951  {
12952  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12953  {
12954  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12955  char *pBytes = VMA_NULL;
12956  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12957  if(res == VK_SUCCESS)
12958  {
12959  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12960  hAllocation->BlockAllocMap();
12961  }
12962  return res;
12963  }
12964  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12965  return hAllocation->DedicatedAllocMap(this, ppData);
12966  default:
12967  VMA_ASSERT(0);
12968  return VK_ERROR_MEMORY_MAP_FAILED;
12969  }
12970 }
12971 
12972 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12973 {
12974  switch(hAllocation->GetType())
12975  {
12976  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12977  {
12978  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12979  hAllocation->BlockAllocUnmap();
12980  pBlock->Unmap(this, 1);
12981  }
12982  break;
12983  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12984  hAllocation->DedicatedAllocUnmap(this);
12985  break;
12986  default:
12987  VMA_ASSERT(0);
12988  }
12989 }
12990 
12991 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12992 {
12993  VkResult res = VK_SUCCESS;
12994  switch(hAllocation->GetType())
12995  {
12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12997  res = GetVulkanFunctions().vkBindBufferMemory(
12998  m_hDevice,
12999  hBuffer,
13000  hAllocation->GetMemory(),
13001  0); //memoryOffset
13002  break;
13003  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13004  {
13005  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13006  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13007  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13008  break;
13009  }
13010  default:
13011  VMA_ASSERT(0);
13012  }
13013  return res;
13014 }
13015 
13016 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13017 {
13018  VkResult res = VK_SUCCESS;
13019  switch(hAllocation->GetType())
13020  {
13021  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13022  res = GetVulkanFunctions().vkBindImageMemory(
13023  m_hDevice,
13024  hImage,
13025  hAllocation->GetMemory(),
13026  0); //memoryOffset
13027  break;
13028  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13029  {
13030  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13031  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13032  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13033  break;
13034  }
13035  default:
13036  VMA_ASSERT(0);
13037  }
13038  return res;
13039 }
13040 
13041 void VmaAllocator_T::FlushOrInvalidateAllocation(
13042  VmaAllocation hAllocation,
13043  VkDeviceSize offset, VkDeviceSize size,
13044  VMA_CACHE_OPERATION op)
13045 {
13046  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13047  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13048  {
13049  const VkDeviceSize allocationSize = hAllocation->GetSize();
13050  VMA_ASSERT(offset <= allocationSize);
13051 
13052  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13053 
13054  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13055  memRange.memory = hAllocation->GetMemory();
13056 
13057  switch(hAllocation->GetType())
13058  {
13059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13060  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13061  if(size == VK_WHOLE_SIZE)
13062  {
13063  memRange.size = allocationSize - memRange.offset;
13064  }
13065  else
13066  {
13067  VMA_ASSERT(offset + size <= allocationSize);
13068  memRange.size = VMA_MIN(
13069  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13070  allocationSize - memRange.offset);
13071  }
13072  break;
13073 
13074  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13075  {
13076  // 1. Still within this allocation.
13077  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13078  if(size == VK_WHOLE_SIZE)
13079  {
13080  size = allocationSize - offset;
13081  }
13082  else
13083  {
13084  VMA_ASSERT(offset + size <= allocationSize);
13085  }
13086  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13087 
13088  // 2. Adjust to whole block.
13089  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13090  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13091  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13092  memRange.offset += allocationOffset;
13093  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13094 
13095  break;
13096  }
13097 
13098  default:
13099  VMA_ASSERT(0);
13100  }
13101 
13102  switch(op)
13103  {
13104  case VMA_CACHE_FLUSH:
13105  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13106  break;
13107  case VMA_CACHE_INVALIDATE:
13108  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13109  break;
13110  default:
13111  VMA_ASSERT(0);
13112  }
13113  }
13114  // else: Just ignore this call.
13115 }
13116 
13117 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13118 {
13119  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13120 
13121  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13122  {
13123  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13124  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13125  VMA_ASSERT(pDedicatedAllocations);
13126  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13127  VMA_ASSERT(success);
13128  }
13129 
13130  VkDeviceMemory hMemory = allocation->GetMemory();
13131 
13132  /*
13133  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13134  before vkFreeMemory.
13135 
13136  if(allocation->GetMappedData() != VMA_NULL)
13137  {
13138  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13139  }
13140  */
13141 
13142  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13143 
13144  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13145 }
13146 
13147 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13148 {
13149  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13150  !hAllocation->CanBecomeLost() &&
13151  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13152  {
13153  void* pData = VMA_NULL;
13154  VkResult res = Map(hAllocation, &pData);
13155  if(res == VK_SUCCESS)
13156  {
13157  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13158  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13159  Unmap(hAllocation);
13160  }
13161  else
13162  {
13163  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13164  }
13165  }
13166 }
13167 
13168 #if VMA_STATS_STRING_ENABLED
13169 
13170 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13171 {
13172  bool dedicatedAllocationsStarted = false;
13173  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13174  {
13175  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13176  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13177  VMA_ASSERT(pDedicatedAllocVector);
13178  if(pDedicatedAllocVector->empty() == false)
13179  {
13180  if(dedicatedAllocationsStarted == false)
13181  {
13182  dedicatedAllocationsStarted = true;
13183  json.WriteString("DedicatedAllocations");
13184  json.BeginObject();
13185  }
13186 
13187  json.BeginString("Type ");
13188  json.ContinueString(memTypeIndex);
13189  json.EndString();
13190 
13191  json.BeginArray();
13192 
13193  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13194  {
13195  json.BeginObject(true);
13196  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13197  hAlloc->PrintParameters(json);
13198  json.EndObject();
13199  }
13200 
13201  json.EndArray();
13202  }
13203  }
13204  if(dedicatedAllocationsStarted)
13205  {
13206  json.EndObject();
13207  }
13208 
13209  {
13210  bool allocationsStarted = false;
13211  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13212  {
13213  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13214  {
13215  if(allocationsStarted == false)
13216  {
13217  allocationsStarted = true;
13218  json.WriteString("DefaultPools");
13219  json.BeginObject();
13220  }
13221 
13222  json.BeginString("Type ");
13223  json.ContinueString(memTypeIndex);
13224  json.EndString();
13225 
13226  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13227  }
13228  }
13229  if(allocationsStarted)
13230  {
13231  json.EndObject();
13232  }
13233  }
13234 
13235  // Custom pools
13236  {
13237  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13238  const size_t poolCount = m_Pools.size();
13239  if(poolCount > 0)
13240  {
13241  json.WriteString("Pools");
13242  json.BeginObject();
13243  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13244  {
13245  json.BeginString();
13246  json.ContinueString(m_Pools[poolIndex]->GetId());
13247  json.EndString();
13248 
13249  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13250  }
13251  json.EndObject();
13252  }
13253  }
13254 }
13255 
13256 #endif // #if VMA_STATS_STRING_ENABLED
13257 
13259 // Public interface
13260 
13261 VkResult vmaCreateAllocator(
13262  const VmaAllocatorCreateInfo* pCreateInfo,
13263  VmaAllocator* pAllocator)
13264 {
13265  VMA_ASSERT(pCreateInfo && pAllocator);
13266  VMA_DEBUG_LOG("vmaCreateAllocator");
13267  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13268  return (*pAllocator)->Init(pCreateInfo);
13269 }
13270 
13271 void vmaDestroyAllocator(
13272  VmaAllocator allocator)
13273 {
13274  if(allocator != VK_NULL_HANDLE)
13275  {
13276  VMA_DEBUG_LOG("vmaDestroyAllocator");
13277  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13278  vma_delete(&allocationCallbacks, allocator);
13279  }
13280 }
13281 
13283  VmaAllocator allocator,
13284  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13285 {
13286  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13287  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13288 }
13289 
13291  VmaAllocator allocator,
13292  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13293 {
13294  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13295  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13296 }
13297 
13299  VmaAllocator allocator,
13300  uint32_t memoryTypeIndex,
13301  VkMemoryPropertyFlags* pFlags)
13302 {
13303  VMA_ASSERT(allocator && pFlags);
13304  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13305  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13306 }
13307 
13309  VmaAllocator allocator,
13310  uint32_t frameIndex)
13311 {
13312  VMA_ASSERT(allocator);
13313  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13314 
13315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13316 
13317  allocator->SetCurrentFrameIndex(frameIndex);
13318 }
13319 
13320 void vmaCalculateStats(
13321  VmaAllocator allocator,
13322  VmaStats* pStats)
13323 {
13324  VMA_ASSERT(allocator && pStats);
13325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13326  allocator->CalculateStats(pStats);
13327 }
13328 
13329 #if VMA_STATS_STRING_ENABLED
13330 
13331 void vmaBuildStatsString(
13332  VmaAllocator allocator,
13333  char** ppStatsString,
13334  VkBool32 detailedMap)
13335 {
13336  VMA_ASSERT(allocator && ppStatsString);
13337  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13338 
13339  VmaStringBuilder sb(allocator);
13340  {
13341  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13342  json.BeginObject();
13343 
13344  VmaStats stats;
13345  allocator->CalculateStats(&stats);
13346 
13347  json.WriteString("Total");
13348  VmaPrintStatInfo(json, stats.total);
13349 
13350  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13351  {
13352  json.BeginString("Heap ");
13353  json.ContinueString(heapIndex);
13354  json.EndString();
13355  json.BeginObject();
13356 
13357  json.WriteString("Size");
13358  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13359 
13360  json.WriteString("Flags");
13361  json.BeginArray(true);
13362  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13363  {
13364  json.WriteString("DEVICE_LOCAL");
13365  }
13366  json.EndArray();
13367 
13368  if(stats.memoryHeap[heapIndex].blockCount > 0)
13369  {
13370  json.WriteString("Stats");
13371  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13372  }
13373 
13374  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13375  {
13376  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13377  {
13378  json.BeginString("Type ");
13379  json.ContinueString(typeIndex);
13380  json.EndString();
13381 
13382  json.BeginObject();
13383 
13384  json.WriteString("Flags");
13385  json.BeginArray(true);
13386  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13387  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13388  {
13389  json.WriteString("DEVICE_LOCAL");
13390  }
13391  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13392  {
13393  json.WriteString("HOST_VISIBLE");
13394  }
13395  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13396  {
13397  json.WriteString("HOST_COHERENT");
13398  }
13399  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13400  {
13401  json.WriteString("HOST_CACHED");
13402  }
13403  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13404  {
13405  json.WriteString("LAZILY_ALLOCATED");
13406  }
13407  json.EndArray();
13408 
13409  if(stats.memoryType[typeIndex].blockCount > 0)
13410  {
13411  json.WriteString("Stats");
13412  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13413  }
13414 
13415  json.EndObject();
13416  }
13417  }
13418 
13419  json.EndObject();
13420  }
13421  if(detailedMap == VK_TRUE)
13422  {
13423  allocator->PrintDetailedMap(json);
13424  }
13425 
13426  json.EndObject();
13427  }
13428 
13429  const size_t len = sb.GetLength();
13430  char* const pChars = vma_new_array(allocator, char, len + 1);
13431  if(len > 0)
13432  {
13433  memcpy(pChars, sb.GetData(), len);
13434  }
13435  pChars[len] = '\0';
13436  *ppStatsString = pChars;
13437 }
13438 
13439 void vmaFreeStatsString(
13440  VmaAllocator allocator,
13441  char* pStatsString)
13442 {
13443  if(pStatsString != VMA_NULL)
13444  {
13445  VMA_ASSERT(allocator);
13446  size_t len = strlen(pStatsString);
13447  vma_delete_array(allocator, pStatsString, len + 1);
13448  }
13449 }
13450 
13451 #endif // #if VMA_STATS_STRING_ENABLED
13452 
13453 /*
13454 This function is not protected by any mutex because it just reads immutable data.
13455 */
13456 VkResult vmaFindMemoryTypeIndex(
13457  VmaAllocator allocator,
13458  uint32_t memoryTypeBits,
13459  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13460  uint32_t* pMemoryTypeIndex)
13461 {
13462  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13463  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13464  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13465 
13466  if(pAllocationCreateInfo->memoryTypeBits != 0)
13467  {
13468  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13469  }
13470 
13471  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13472  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13473 
13474  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13475  if(mapped)
13476  {
13477  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13478  }
13479 
13480  // Convert usage to requiredFlags and preferredFlags.
13481  switch(pAllocationCreateInfo->usage)
13482  {
13484  break;
13486  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13487  {
13488  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13489  }
13490  break;
13492  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13493  break;
13495  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13496  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13497  {
13498  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13499  }
13500  break;
13502  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13503  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13504  break;
13505  default:
13506  break;
13507  }
13508 
13509  *pMemoryTypeIndex = UINT32_MAX;
13510  uint32_t minCost = UINT32_MAX;
13511  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13512  memTypeIndex < allocator->GetMemoryTypeCount();
13513  ++memTypeIndex, memTypeBit <<= 1)
13514  {
13515  // This memory type is acceptable according to memoryTypeBits bitmask.
13516  if((memTypeBit & memoryTypeBits) != 0)
13517  {
13518  const VkMemoryPropertyFlags currFlags =
13519  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13520  // This memory type contains requiredFlags.
13521  if((requiredFlags & ~currFlags) == 0)
13522  {
13523  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13524  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13525  // Remember memory type with lowest cost.
13526  if(currCost < minCost)
13527  {
13528  *pMemoryTypeIndex = memTypeIndex;
13529  if(currCost == 0)
13530  {
13531  return VK_SUCCESS;
13532  }
13533  minCost = currCost;
13534  }
13535  }
13536  }
13537  }
13538  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13539 }
13540 
13542  VmaAllocator allocator,
13543  const VkBufferCreateInfo* pBufferCreateInfo,
13544  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13545  uint32_t* pMemoryTypeIndex)
13546 {
13547  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13548  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13549  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13550  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13551 
13552  const VkDevice hDev = allocator->m_hDevice;
13553  VkBuffer hBuffer = VK_NULL_HANDLE;
13554  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13555  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13556  if(res == VK_SUCCESS)
13557  {
13558  VkMemoryRequirements memReq = {};
13559  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13560  hDev, hBuffer, &memReq);
13561 
13562  res = vmaFindMemoryTypeIndex(
13563  allocator,
13564  memReq.memoryTypeBits,
13565  pAllocationCreateInfo,
13566  pMemoryTypeIndex);
13567 
13568  allocator->GetVulkanFunctions().vkDestroyBuffer(
13569  hDev, hBuffer, allocator->GetAllocationCallbacks());
13570  }
13571  return res;
13572 }
13573 
13575  VmaAllocator allocator,
13576  const VkImageCreateInfo* pImageCreateInfo,
13577  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13578  uint32_t* pMemoryTypeIndex)
13579 {
13580  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13581  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13582  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13583  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13584 
13585  const VkDevice hDev = allocator->m_hDevice;
13586  VkImage hImage = VK_NULL_HANDLE;
13587  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13588  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13589  if(res == VK_SUCCESS)
13590  {
13591  VkMemoryRequirements memReq = {};
13592  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13593  hDev, hImage, &memReq);
13594 
13595  res = vmaFindMemoryTypeIndex(
13596  allocator,
13597  memReq.memoryTypeBits,
13598  pAllocationCreateInfo,
13599  pMemoryTypeIndex);
13600 
13601  allocator->GetVulkanFunctions().vkDestroyImage(
13602  hDev, hImage, allocator->GetAllocationCallbacks());
13603  }
13604  return res;
13605 }
13606 
13607 VkResult vmaCreatePool(
13608  VmaAllocator allocator,
13609  const VmaPoolCreateInfo* pCreateInfo,
13610  VmaPool* pPool)
13611 {
13612  VMA_ASSERT(allocator && pCreateInfo && pPool);
13613 
13614  VMA_DEBUG_LOG("vmaCreatePool");
13615 
13616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13617 
13618  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13619 
13620 #if VMA_RECORDING_ENABLED
13621  if(allocator->GetRecorder() != VMA_NULL)
13622  {
13623  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13624  }
13625 #endif
13626 
13627  return res;
13628 }
13629 
13630 void vmaDestroyPool(
13631  VmaAllocator allocator,
13632  VmaPool pool)
13633 {
13634  VMA_ASSERT(allocator);
13635 
13636  if(pool == VK_NULL_HANDLE)
13637  {
13638  return;
13639  }
13640 
13641  VMA_DEBUG_LOG("vmaDestroyPool");
13642 
13643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13644 
13645 #if VMA_RECORDING_ENABLED
13646  if(allocator->GetRecorder() != VMA_NULL)
13647  {
13648  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13649  }
13650 #endif
13651 
13652  allocator->DestroyPool(pool);
13653 }
13654 
13655 void vmaGetPoolStats(
13656  VmaAllocator allocator,
13657  VmaPool pool,
13658  VmaPoolStats* pPoolStats)
13659 {
13660  VMA_ASSERT(allocator && pool && pPoolStats);
13661 
13662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13663 
13664  allocator->GetPoolStats(pool, pPoolStats);
13665 }
13666 
13668  VmaAllocator allocator,
13669  VmaPool pool,
13670  size_t* pLostAllocationCount)
13671 {
13672  VMA_ASSERT(allocator && pool);
13673 
13674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13675 
13676 #if VMA_RECORDING_ENABLED
13677  if(allocator->GetRecorder() != VMA_NULL)
13678  {
13679  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13680  }
13681 #endif
13682 
13683  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13684 }
13685 
13686 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13687 {
13688  VMA_ASSERT(allocator && pool);
13689 
13690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13691 
13692  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13693 
13694  return allocator->CheckPoolCorruption(pool);
13695 }
13696 
13697 VkResult vmaAllocateMemory(
13698  VmaAllocator allocator,
13699  const VkMemoryRequirements* pVkMemoryRequirements,
13700  const VmaAllocationCreateInfo* pCreateInfo,
13701  VmaAllocation* pAllocation,
13702  VmaAllocationInfo* pAllocationInfo)
13703 {
13704  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13705 
13706  VMA_DEBUG_LOG("vmaAllocateMemory");
13707 
13708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13709 
13710  VkResult result = allocator->AllocateMemory(
13711  *pVkMemoryRequirements,
13712  false, // requiresDedicatedAllocation
13713  false, // prefersDedicatedAllocation
13714  VK_NULL_HANDLE, // dedicatedBuffer
13715  VK_NULL_HANDLE, // dedicatedImage
13716  *pCreateInfo,
13717  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13718  pAllocation);
13719 
13720 #if VMA_RECORDING_ENABLED
13721  if(allocator->GetRecorder() != VMA_NULL)
13722  {
13723  allocator->GetRecorder()->RecordAllocateMemory(
13724  allocator->GetCurrentFrameIndex(),
13725  *pVkMemoryRequirements,
13726  *pCreateInfo,
13727  *pAllocation);
13728  }
13729 #endif
13730 
13731  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13732  {
13733  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13734  }
13735 
13736  return result;
13737 }
13738 
13740  VmaAllocator allocator,
13741  VkBuffer buffer,
13742  const VmaAllocationCreateInfo* pCreateInfo,
13743  VmaAllocation* pAllocation,
13744  VmaAllocationInfo* pAllocationInfo)
13745 {
13746  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13747 
13748  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13749 
13750  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13751 
13752  VkMemoryRequirements vkMemReq = {};
13753  bool requiresDedicatedAllocation = false;
13754  bool prefersDedicatedAllocation = false;
13755  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13756  requiresDedicatedAllocation,
13757  prefersDedicatedAllocation);
13758 
13759  VkResult result = allocator->AllocateMemory(
13760  vkMemReq,
13761  requiresDedicatedAllocation,
13762  prefersDedicatedAllocation,
13763  buffer, // dedicatedBuffer
13764  VK_NULL_HANDLE, // dedicatedImage
13765  *pCreateInfo,
13766  VMA_SUBALLOCATION_TYPE_BUFFER,
13767  pAllocation);
13768 
13769 #if VMA_RECORDING_ENABLED
13770  if(allocator->GetRecorder() != VMA_NULL)
13771  {
13772  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13773  allocator->GetCurrentFrameIndex(),
13774  vkMemReq,
13775  requiresDedicatedAllocation,
13776  prefersDedicatedAllocation,
13777  *pCreateInfo,
13778  *pAllocation);
13779  }
13780 #endif
13781 
13782  if(pAllocationInfo && result == VK_SUCCESS)
13783  {
13784  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13785  }
13786 
13787  return result;
13788 }
13789 
13790 VkResult vmaAllocateMemoryForImage(
13791  VmaAllocator allocator,
13792  VkImage image,
13793  const VmaAllocationCreateInfo* pCreateInfo,
13794  VmaAllocation* pAllocation,
13795  VmaAllocationInfo* pAllocationInfo)
13796 {
13797  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13798 
13799  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13800 
13801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13802 
13803  VkMemoryRequirements vkMemReq = {};
13804  bool requiresDedicatedAllocation = false;
13805  bool prefersDedicatedAllocation = false;
13806  allocator->GetImageMemoryRequirements(image, vkMemReq,
13807  requiresDedicatedAllocation, prefersDedicatedAllocation);
13808 
13809  VkResult result = allocator->AllocateMemory(
13810  vkMemReq,
13811  requiresDedicatedAllocation,
13812  prefersDedicatedAllocation,
13813  VK_NULL_HANDLE, // dedicatedBuffer
13814  image, // dedicatedImage
13815  *pCreateInfo,
13816  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13817  pAllocation);
13818 
13819 #if VMA_RECORDING_ENABLED
13820  if(allocator->GetRecorder() != VMA_NULL)
13821  {
13822  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13823  allocator->GetCurrentFrameIndex(),
13824  vkMemReq,
13825  requiresDedicatedAllocation,
13826  prefersDedicatedAllocation,
13827  *pCreateInfo,
13828  *pAllocation);
13829  }
13830 #endif
13831 
13832  if(pAllocationInfo && result == VK_SUCCESS)
13833  {
13834  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13835  }
13836 
13837  return result;
13838 }
13839 
13840 void vmaFreeMemory(
13841  VmaAllocator allocator,
13842  VmaAllocation allocation)
13843 {
13844  VMA_ASSERT(allocator);
13845 
13846  if(allocation == VK_NULL_HANDLE)
13847  {
13848  return;
13849  }
13850 
13851  VMA_DEBUG_LOG("vmaFreeMemory");
13852 
13853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13854 
13855 #if VMA_RECORDING_ENABLED
13856  if(allocator->GetRecorder() != VMA_NULL)
13857  {
13858  allocator->GetRecorder()->RecordFreeMemory(
13859  allocator->GetCurrentFrameIndex(),
13860  allocation);
13861  }
13862 #endif
13863 
13864  allocator->FreeMemory(allocation);
13865 }
13866 
13868  VmaAllocator allocator,
13869  VmaAllocation allocation,
13870  VmaAllocationInfo* pAllocationInfo)
13871 {
13872  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13873 
13874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13875 
13876 #if VMA_RECORDING_ENABLED
13877  if(allocator->GetRecorder() != VMA_NULL)
13878  {
13879  allocator->GetRecorder()->RecordGetAllocationInfo(
13880  allocator->GetCurrentFrameIndex(),
13881  allocation);
13882  }
13883 #endif
13884 
13885  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13886 }
13887 
13888 VkBool32 vmaTouchAllocation(
13889  VmaAllocator allocator,
13890  VmaAllocation allocation)
13891 {
13892  VMA_ASSERT(allocator && allocation);
13893 
13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13895 
13896 #if VMA_RECORDING_ENABLED
13897  if(allocator->GetRecorder() != VMA_NULL)
13898  {
13899  allocator->GetRecorder()->RecordTouchAllocation(
13900  allocator->GetCurrentFrameIndex(),
13901  allocation);
13902  }
13903 #endif
13904 
13905  return allocator->TouchAllocation(allocation);
13906 }
13907 
13909  VmaAllocator allocator,
13910  VmaAllocation allocation,
13911  void* pUserData)
13912 {
13913  VMA_ASSERT(allocator && allocation);
13914 
13915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13916 
13917  allocation->SetUserData(allocator, pUserData);
13918 
13919 #if VMA_RECORDING_ENABLED
13920  if(allocator->GetRecorder() != VMA_NULL)
13921  {
13922  allocator->GetRecorder()->RecordSetAllocationUserData(
13923  allocator->GetCurrentFrameIndex(),
13924  allocation,
13925  pUserData);
13926  }
13927 #endif
13928 }
13929 
13931  VmaAllocator allocator,
13932  VmaAllocation* pAllocation)
13933 {
13934  VMA_ASSERT(allocator && pAllocation);
13935 
13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13937 
13938  allocator->CreateLostAllocation(pAllocation);
13939 
13940 #if VMA_RECORDING_ENABLED
13941  if(allocator->GetRecorder() != VMA_NULL)
13942  {
13943  allocator->GetRecorder()->RecordCreateLostAllocation(
13944  allocator->GetCurrentFrameIndex(),
13945  *pAllocation);
13946  }
13947 #endif
13948 }
13949 
13950 VkResult vmaMapMemory(
13951  VmaAllocator allocator,
13952  VmaAllocation allocation,
13953  void** ppData)
13954 {
13955  VMA_ASSERT(allocator && allocation && ppData);
13956 
13957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13958 
13959  VkResult res = allocator->Map(allocation, ppData);
13960 
13961 #if VMA_RECORDING_ENABLED
13962  if(allocator->GetRecorder() != VMA_NULL)
13963  {
13964  allocator->GetRecorder()->RecordMapMemory(
13965  allocator->GetCurrentFrameIndex(),
13966  allocation);
13967  }
13968 #endif
13969 
13970  return res;
13971 }
13972 
13973 void vmaUnmapMemory(
13974  VmaAllocator allocator,
13975  VmaAllocation allocation)
13976 {
13977  VMA_ASSERT(allocator && allocation);
13978 
13979  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13980 
13981 #if VMA_RECORDING_ENABLED
13982  if(allocator->GetRecorder() != VMA_NULL)
13983  {
13984  allocator->GetRecorder()->RecordUnmapMemory(
13985  allocator->GetCurrentFrameIndex(),
13986  allocation);
13987  }
13988 #endif
13989 
13990  allocator->Unmap(allocation);
13991 }
13992 
13993 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13994 {
13995  VMA_ASSERT(allocator && allocation);
13996 
13997  VMA_DEBUG_LOG("vmaFlushAllocation");
13998 
13999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14000 
14001  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14002 
14003 #if VMA_RECORDING_ENABLED
14004  if(allocator->GetRecorder() != VMA_NULL)
14005  {
14006  allocator->GetRecorder()->RecordFlushAllocation(
14007  allocator->GetCurrentFrameIndex(),
14008  allocation, offset, size);
14009  }
14010 #endif
14011 }
14012 
14013 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14014 {
14015  VMA_ASSERT(allocator && allocation);
14016 
14017  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14018 
14019  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14020 
14021  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14022 
14023 #if VMA_RECORDING_ENABLED
14024  if(allocator->GetRecorder() != VMA_NULL)
14025  {
14026  allocator->GetRecorder()->RecordInvalidateAllocation(
14027  allocator->GetCurrentFrameIndex(),
14028  allocation, offset, size);
14029  }
14030 #endif
14031 }
14032 
14033 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14034 {
14035  VMA_ASSERT(allocator);
14036 
14037  VMA_DEBUG_LOG("vmaCheckCorruption");
14038 
14039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14040 
14041  return allocator->CheckCorruption(memoryTypeBits);
14042 }
14043 
14044 VkResult vmaDefragment(
14045  VmaAllocator allocator,
14046  VmaAllocation* pAllocations,
14047  size_t allocationCount,
14048  VkBool32* pAllocationsChanged,
14049  const VmaDefragmentationInfo *pDefragmentationInfo,
14050  VmaDefragmentationStats* pDefragmentationStats)
14051 {
14052  VMA_ASSERT(allocator && pAllocations);
14053 
14054  VMA_DEBUG_LOG("vmaDefragment");
14055 
14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14057 
14058  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14059 }
14060 
14061 VkResult vmaBindBufferMemory(
14062  VmaAllocator allocator,
14063  VmaAllocation allocation,
14064  VkBuffer buffer)
14065 {
14066  VMA_ASSERT(allocator && allocation && buffer);
14067 
14068  VMA_DEBUG_LOG("vmaBindBufferMemory");
14069 
14070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14071 
14072  return allocator->BindBufferMemory(allocation, buffer);
14073 }
14074 
14075 VkResult vmaBindImageMemory(
14076  VmaAllocator allocator,
14077  VmaAllocation allocation,
14078  VkImage image)
14079 {
14080  VMA_ASSERT(allocator && allocation && image);
14081 
14082  VMA_DEBUG_LOG("vmaBindImageMemory");
14083 
14084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14085 
14086  return allocator->BindImageMemory(allocation, image);
14087 }
14088 
14089 VkResult vmaCreateBuffer(
14090  VmaAllocator allocator,
14091  const VkBufferCreateInfo* pBufferCreateInfo,
14092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14093  VkBuffer* pBuffer,
14094  VmaAllocation* pAllocation,
14095  VmaAllocationInfo* pAllocationInfo)
14096 {
14097  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14098 
14099  VMA_DEBUG_LOG("vmaCreateBuffer");
14100 
14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14102 
14103  *pBuffer = VK_NULL_HANDLE;
14104  *pAllocation = VK_NULL_HANDLE;
14105 
14106  // 1. Create VkBuffer.
14107  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14108  allocator->m_hDevice,
14109  pBufferCreateInfo,
14110  allocator->GetAllocationCallbacks(),
14111  pBuffer);
14112  if(res >= 0)
14113  {
14114  // 2. vkGetBufferMemoryRequirements.
14115  VkMemoryRequirements vkMemReq = {};
14116  bool requiresDedicatedAllocation = false;
14117  bool prefersDedicatedAllocation = false;
14118  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14119  requiresDedicatedAllocation, prefersDedicatedAllocation);
14120 
14121  // Make sure alignment requirements for specific buffer usages reported
14122  // in Physical Device Properties are included in alignment reported by memory requirements.
14123  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14124  {
14125  VMA_ASSERT(vkMemReq.alignment %
14126  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14127  }
14128  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14129  {
14130  VMA_ASSERT(vkMemReq.alignment %
14131  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14132  }
14133  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14134  {
14135  VMA_ASSERT(vkMemReq.alignment %
14136  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14137  }
14138 
14139  // 3. Allocate memory using allocator.
14140  res = allocator->AllocateMemory(
14141  vkMemReq,
14142  requiresDedicatedAllocation,
14143  prefersDedicatedAllocation,
14144  *pBuffer, // dedicatedBuffer
14145  VK_NULL_HANDLE, // dedicatedImage
14146  *pAllocationCreateInfo,
14147  VMA_SUBALLOCATION_TYPE_BUFFER,
14148  pAllocation);
14149 
14150 #if VMA_RECORDING_ENABLED
14151  if(allocator->GetRecorder() != VMA_NULL)
14152  {
14153  allocator->GetRecorder()->RecordCreateBuffer(
14154  allocator->GetCurrentFrameIndex(),
14155  *pBufferCreateInfo,
14156  *pAllocationCreateInfo,
14157  *pAllocation);
14158  }
14159 #endif
14160 
14161  if(res >= 0)
14162  {
14163  // 3. Bind buffer with memory.
14164  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14165  if(res >= 0)
14166  {
14167  // All steps succeeded.
14168  #if VMA_STATS_STRING_ENABLED
14169  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14170  #endif
14171  if(pAllocationInfo != VMA_NULL)
14172  {
14173  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14174  }
14175 
14176  return VK_SUCCESS;
14177  }
14178  allocator->FreeMemory(*pAllocation);
14179  *pAllocation = VK_NULL_HANDLE;
14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14181  *pBuffer = VK_NULL_HANDLE;
14182  return res;
14183  }
14184  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14185  *pBuffer = VK_NULL_HANDLE;
14186  return res;
14187  }
14188  return res;
14189 }
14190 
14191 void vmaDestroyBuffer(
14192  VmaAllocator allocator,
14193  VkBuffer buffer,
14194  VmaAllocation allocation)
14195 {
14196  VMA_ASSERT(allocator);
14197 
14198  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14199  {
14200  return;
14201  }
14202 
14203  VMA_DEBUG_LOG("vmaDestroyBuffer");
14204 
14205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14206 
14207 #if VMA_RECORDING_ENABLED
14208  if(allocator->GetRecorder() != VMA_NULL)
14209  {
14210  allocator->GetRecorder()->RecordDestroyBuffer(
14211  allocator->GetCurrentFrameIndex(),
14212  allocation);
14213  }
14214 #endif
14215 
14216  if(buffer != VK_NULL_HANDLE)
14217  {
14218  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14219  }
14220 
14221  if(allocation != VK_NULL_HANDLE)
14222  {
14223  allocator->FreeMemory(allocation);
14224  }
14225 }
14226 
14227 VkResult vmaCreateImage(
14228  VmaAllocator allocator,
14229  const VkImageCreateInfo* pImageCreateInfo,
14230  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14231  VkImage* pImage,
14232  VmaAllocation* pAllocation,
14233  VmaAllocationInfo* pAllocationInfo)
14234 {
14235  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14236 
14237  VMA_DEBUG_LOG("vmaCreateImage");
14238 
14239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14240 
14241  *pImage = VK_NULL_HANDLE;
14242  *pAllocation = VK_NULL_HANDLE;
14243 
14244  // 1. Create VkImage.
14245  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14246  allocator->m_hDevice,
14247  pImageCreateInfo,
14248  allocator->GetAllocationCallbacks(),
14249  pImage);
14250  if(res >= 0)
14251  {
14252  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14253  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14254  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14255 
14256  // 2. Allocate memory using allocator.
14257  VkMemoryRequirements vkMemReq = {};
14258  bool requiresDedicatedAllocation = false;
14259  bool prefersDedicatedAllocation = false;
14260  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14261  requiresDedicatedAllocation, prefersDedicatedAllocation);
14262 
14263  res = allocator->AllocateMemory(
14264  vkMemReq,
14265  requiresDedicatedAllocation,
14266  prefersDedicatedAllocation,
14267  VK_NULL_HANDLE, // dedicatedBuffer
14268  *pImage, // dedicatedImage
14269  *pAllocationCreateInfo,
14270  suballocType,
14271  pAllocation);
14272 
14273 #if VMA_RECORDING_ENABLED
14274  if(allocator->GetRecorder() != VMA_NULL)
14275  {
14276  allocator->GetRecorder()->RecordCreateImage(
14277  allocator->GetCurrentFrameIndex(),
14278  *pImageCreateInfo,
14279  *pAllocationCreateInfo,
14280  *pAllocation);
14281  }
14282 #endif
14283 
14284  if(res >= 0)
14285  {
14286  // 3. Bind image with memory.
14287  res = allocator->BindImageMemory(*pAllocation, *pImage);
14288  if(res >= 0)
14289  {
14290  // All steps succeeded.
14291  #if VMA_STATS_STRING_ENABLED
14292  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14293  #endif
14294  if(pAllocationInfo != VMA_NULL)
14295  {
14296  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14297  }
14298 
14299  return VK_SUCCESS;
14300  }
14301  allocator->FreeMemory(*pAllocation);
14302  *pAllocation = VK_NULL_HANDLE;
14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14304  *pImage = VK_NULL_HANDLE;
14305  return res;
14306  }
14307  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14308  *pImage = VK_NULL_HANDLE;
14309  return res;
14310  }
14311  return res;
14312 }
14313 
14314 void vmaDestroyImage(
14315  VmaAllocator allocator,
14316  VkImage image,
14317  VmaAllocation allocation)
14318 {
14319  VMA_ASSERT(allocator);
14320 
14321  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14322  {
14323  return;
14324  }
14325 
14326  VMA_DEBUG_LOG("vmaDestroyImage");
14327 
14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14329 
14330 #if VMA_RECORDING_ENABLED
14331  if(allocator->GetRecorder() != VMA_NULL)
14332  {
14333  allocator->GetRecorder()->RecordDestroyImage(
14334  allocator->GetCurrentFrameIndex(),
14335  allocation);
14336  }
14337 #endif
14338 
14339  if(image != VK_NULL_HANDLE)
14340  {
14341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14342  }
14343  if(allocation != VK_NULL_HANDLE)
14344  {
14345  allocator->FreeMemory(allocation);
14346  }
14347 }
14348 
14349 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1575
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1876
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1477 /*
1478 Define this macro to 0/1 to disable/enable support for recording functionality,
1479 available through VmaAllocatorCreateInfo::pRecordSettings.
1480 */
1481 #ifndef VMA_RECORDING_ENABLED
1482  #ifdef _WIN32
1483  #define VMA_RECORDING_ENABLED 1
1484  #else
1485  #define VMA_RECORDING_ENABLED 0
1486  #endif
1487 #endif
1488 
1489 #ifndef NOMINMAX
1490  #define NOMINMAX // For windows.h
1491 #endif
1492 
1493 #include <vulkan/vulkan.h>
1494 
1495 #if VMA_RECORDING_ENABLED
1496  #include <windows.h>
1497 #endif
1498 
1499 #if !defined(VMA_DEDICATED_ALLOCATION)
1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1501  #define VMA_DEDICATED_ALLOCATION 1
1502  #else
1503  #define VMA_DEDICATED_ALLOCATION 0
1504  #endif
1505 #endif
1506 
1516 VK_DEFINE_HANDLE(VmaAllocator)
1517 
1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1520  VmaAllocator allocator,
1521  uint32_t memoryType,
1522  VkDeviceMemory memory,
1523  VkDeviceSize size);
1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1526  VmaAllocator allocator,
1527  uint32_t memoryType,
1528  VkDeviceMemory memory,
1529  VkDeviceSize size);
1530 
1544 
1574 
1577 typedef VkFlags VmaAllocatorCreateFlags;
1578 
1583 typedef struct VmaVulkanFunctions {
1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1586  PFN_vkAllocateMemory vkAllocateMemory;
1587  PFN_vkFreeMemory vkFreeMemory;
1588  PFN_vkMapMemory vkMapMemory;
1589  PFN_vkUnmapMemory vkUnmapMemory;
1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1592  PFN_vkBindBufferMemory vkBindBufferMemory;
1593  PFN_vkBindImageMemory vkBindImageMemory;
1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1596  PFN_vkCreateBuffer vkCreateBuffer;
1597  PFN_vkDestroyBuffer vkDestroyBuffer;
1598  PFN_vkCreateImage vkCreateImage;
1599  PFN_vkDestroyImage vkDestroyImage;
1600 #if VMA_DEDICATED_ALLOCATION
1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1603 #endif
1605 
1607 typedef enum VmaRecordFlagBits {
1614 
1617 typedef VkFlags VmaRecordFlags;
1618 
1620 typedef struct VmaRecordSettings
1621 {
1631  const char* pFilePath;
1633 
1636 {
1640 
1641  VkPhysicalDevice physicalDevice;
1643 
1644  VkDevice device;
1646 
1649 
1650  const VkAllocationCallbacks* pAllocationCallbacks;
1652 
1691  const VkDeviceSize* pHeapSizeLimit;
1712 
1714 VkResult vmaCreateAllocator(
1715  const VmaAllocatorCreateInfo* pCreateInfo,
1716  VmaAllocator* pAllocator);
1717 
1719 void vmaDestroyAllocator(
1720  VmaAllocator allocator);
1721 
1727  VmaAllocator allocator,
1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1729 
1735  VmaAllocator allocator,
1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1737 
1745  VmaAllocator allocator,
1746  uint32_t memoryTypeIndex,
1747  VkMemoryPropertyFlags* pFlags);
1748 
1758  VmaAllocator allocator,
1759  uint32_t frameIndex);
1760 
1763 typedef struct VmaStatInfo
1764 {
1766  uint32_t blockCount;
1772  VkDeviceSize usedBytes;
1774  VkDeviceSize unusedBytes;
1777 } VmaStatInfo;
1778 
1780 typedef struct VmaStats
1781 {
1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1785 } VmaStats;
1786 
1788 void vmaCalculateStats(
1789  VmaAllocator allocator,
1790  VmaStats* pStats);
1791 
1792 #define VMA_STATS_STRING_ENABLED 1
1793 
1794 #if VMA_STATS_STRING_ENABLED
1795 
1797 
1799 void vmaBuildStatsString(
1800  VmaAllocator allocator,
1801  char** ppStatsString,
1802  VkBool32 detailedMap);
1803 
1804 void vmaFreeStatsString(
1805  VmaAllocator allocator,
1806  char* pStatsString);
1807 
1808 #endif // #if VMA_STATS_STRING_ENABLED
1809 
1818 VK_DEFINE_HANDLE(VmaPool)
1819 
1820 typedef enum VmaMemoryUsage
1821 {
1870 } VmaMemoryUsage;
1871 
1886 
1941 
1954 
1964 
1971 
1975 
1977 {
1990  VkMemoryPropertyFlags requiredFlags;
1995  VkMemoryPropertyFlags preferredFlags;
2003  uint32_t memoryTypeBits;
2016  void* pUserData;
2018 
2035 VkResult vmaFindMemoryTypeIndex(
2036  VmaAllocator allocator,
2037  uint32_t memoryTypeBits,
2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2039  uint32_t* pMemoryTypeIndex);
2040 
2054  VmaAllocator allocator,
2055  const VkBufferCreateInfo* pBufferCreateInfo,
2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2057  uint32_t* pMemoryTypeIndex);
2058 
2072  VmaAllocator allocator,
2073  const VkImageCreateInfo* pImageCreateInfo,
2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2075  uint32_t* pMemoryTypeIndex);
2076 
2097 
2114 
2125 
2131 
2134 typedef VkFlags VmaPoolCreateFlags;
2135 
2138 typedef struct VmaPoolCreateInfo {
2153  VkDeviceSize blockSize;
2182 
2185 typedef struct VmaPoolStats {
2188  VkDeviceSize size;
2191  VkDeviceSize unusedSize;
2204  VkDeviceSize unusedRangeSizeMax;
2207  size_t blockCount;
2208 } VmaPoolStats;
2209 
2216 VkResult vmaCreatePool(
2217  VmaAllocator allocator,
2218  const VmaPoolCreateInfo* pCreateInfo,
2219  VmaPool* pPool);
2220 
2223 void vmaDestroyPool(
2224  VmaAllocator allocator,
2225  VmaPool pool);
2226 
2233 void vmaGetPoolStats(
2234  VmaAllocator allocator,
2235  VmaPool pool,
2236  VmaPoolStats* pPoolStats);
2237 
2245  VmaAllocator allocator,
2246  VmaPool pool,
2247  size_t* pLostAllocationCount);
2248 
2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2264 
2289 VK_DEFINE_HANDLE(VmaAllocation)
2290 
2291 
2293 typedef struct VmaAllocationInfo {
2298  uint32_t memoryType;
2307  VkDeviceMemory deviceMemory;
2312  VkDeviceSize offset;
2317  VkDeviceSize size;
2331  void* pUserData;
2333 
2344 VkResult vmaAllocateMemory(
2345  VmaAllocator allocator,
2346  const VkMemoryRequirements* pVkMemoryRequirements,
2347  const VmaAllocationCreateInfo* pCreateInfo,
2348  VmaAllocation* pAllocation,
2349  VmaAllocationInfo* pAllocationInfo);
2350 
2358  VmaAllocator allocator,
2359  VkBuffer buffer,
2360  const VmaAllocationCreateInfo* pCreateInfo,
2361  VmaAllocation* pAllocation,
2362  VmaAllocationInfo* pAllocationInfo);
2363 
2365 VkResult vmaAllocateMemoryForImage(
2366  VmaAllocator allocator,
2367  VkImage image,
2368  const VmaAllocationCreateInfo* pCreateInfo,
2369  VmaAllocation* pAllocation,
2370  VmaAllocationInfo* pAllocationInfo);
2371 
2373 void vmaFreeMemory(
2374  VmaAllocator allocator,
2375  VmaAllocation allocation);
2376 
2394  VmaAllocator allocator,
2395  VmaAllocation allocation,
2396  VmaAllocationInfo* pAllocationInfo);
2397 
2412 VkBool32 vmaTouchAllocation(
2413  VmaAllocator allocator,
2414  VmaAllocation allocation);
2415 
2430  VmaAllocator allocator,
2431  VmaAllocation allocation,
2432  void* pUserData);
2433 
2445  VmaAllocator allocator,
2446  VmaAllocation* pAllocation);
2447 
2482 VkResult vmaMapMemory(
2483  VmaAllocator allocator,
2484  VmaAllocation allocation,
2485  void** ppData);
2486 
2491 void vmaUnmapMemory(
2492  VmaAllocator allocator,
2493  VmaAllocation allocation);
2494 
2507 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2508 
2521 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2522 
2539 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2540 
2542 typedef struct VmaDefragmentationInfo {
2547  VkDeviceSize maxBytesToMove;
2554 
2556 typedef struct VmaDefragmentationStats {
2558  VkDeviceSize bytesMoved;
2560  VkDeviceSize bytesFreed;
2566 
2605 VkResult vmaDefragment(
2606  VmaAllocator allocator,
2607  VmaAllocation* pAllocations,
2608  size_t allocationCount,
2609  VkBool32* pAllocationsChanged,
2610  const VmaDefragmentationInfo *pDefragmentationInfo,
2611  VmaDefragmentationStats* pDefragmentationStats);
2612 
2625 VkResult vmaBindBufferMemory(
2626  VmaAllocator allocator,
2627  VmaAllocation allocation,
2628  VkBuffer buffer);
2629 
2642 VkResult vmaBindImageMemory(
2643  VmaAllocator allocator,
2644  VmaAllocation allocation,
2645  VkImage image);
2646 
2673 VkResult vmaCreateBuffer(
2674  VmaAllocator allocator,
2675  const VkBufferCreateInfo* pBufferCreateInfo,
2676  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2677  VkBuffer* pBuffer,
2678  VmaAllocation* pAllocation,
2679  VmaAllocationInfo* pAllocationInfo);
2680 
2692 void vmaDestroyBuffer(
2693  VmaAllocator allocator,
2694  VkBuffer buffer,
2695  VmaAllocation allocation);
2696 
2698 VkResult vmaCreateImage(
2699  VmaAllocator allocator,
2700  const VkImageCreateInfo* pImageCreateInfo,
2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2702  VkImage* pImage,
2703  VmaAllocation* pAllocation,
2704  VmaAllocationInfo* pAllocationInfo);
2705 
2717 void vmaDestroyImage(
2718  VmaAllocator allocator,
2719  VkImage image,
2720  VmaAllocation allocation);
2721 
2722 #ifdef __cplusplus
2723 }
2724 #endif
2725 
2726 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2727 
2728 // For Visual Studio IntelliSense.
2729 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2730 #define VMA_IMPLEMENTATION
2731 #endif
2732 
2733 #ifdef VMA_IMPLEMENTATION
2734 #undef VMA_IMPLEMENTATION
2735 
2736 #include <cstdint>
2737 #include <cstdlib>
2738 #include <cstring>
2739 
2740 /*******************************************************************************
2741 CONFIGURATION SECTION
2742 
2743 Define some of these macros before each #include of this header or change them
2744 here if you need other then default behavior depending on your environment.
2745 */
2746 
2747 /*
2748 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2749 internally, like:
2750 
2751  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2752 
2753 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2754 VmaAllocatorCreateInfo::pVulkanFunctions.
2755 */
2756 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2757 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2758 #endif
2759 
2760 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2761 //#define VMA_USE_STL_CONTAINERS 1
2762 
2763 /* Set this macro to 1 to make the library including and using STL containers:
2764 std::pair, std::vector, std::list, std::unordered_map.
2765 
2766 Set it to 0 or undefined to make the library using its own implementation of
2767 the containers.
2768 */
2769 #if VMA_USE_STL_CONTAINERS
2770  #define VMA_USE_STL_VECTOR 1
2771  #define VMA_USE_STL_UNORDERED_MAP 1
2772  #define VMA_USE_STL_LIST 1
2773 #endif
2774 
2775 #if VMA_USE_STL_VECTOR
2776  #include <vector>
2777 #endif
2778 
2779 #if VMA_USE_STL_UNORDERED_MAP
2780  #include <unordered_map>
2781 #endif
2782 
2783 #if VMA_USE_STL_LIST
2784  #include <list>
2785 #endif
2786 
2787 /*
2788 Following headers are used in this CONFIGURATION section only, so feel free to
2789 remove them if not needed.
2790 */
2791 #include <cassert> // for assert
2792 #include <algorithm> // for min, max
2793 #include <mutex> // for std::mutex
2794 #include <atomic> // for std::atomic
2795 
2796 #ifndef VMA_NULL
2797  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2798  #define VMA_NULL nullptr
2799 #endif
2800 
2801 #if defined(__APPLE__) || defined(__ANDROID__)
2802 #include <cstdlib>
2803 void *aligned_alloc(size_t alignment, size_t size)
2804 {
2805  // alignment must be >= sizeof(void*)
2806  if(alignment < sizeof(void*))
2807  {
2808  alignment = sizeof(void*);
2809  }
2810 
2811  void *pointer;
2812  if(posix_memalign(&pointer, alignment, size) == 0)
2813  return pointer;
2814  return VMA_NULL;
2815 }
2816 #endif
2817 
2818 // If your compiler is not compatible with C++11 and definition of
2819 // aligned_alloc() function is missing, uncommeting following line may help:
2820 
2821 //#include <malloc.h>
2822 
2823 // Normal assert to check for programmer's errors, especially in Debug configuration.
2824 #ifndef VMA_ASSERT
2825  #ifdef _DEBUG
2826  #define VMA_ASSERT(expr) assert(expr)
2827  #else
2828  #define VMA_ASSERT(expr)
2829  #endif
2830 #endif
2831 
2832 // Assert that will be called very often, like inside data structures e.g. operator[].
2833 // Making it non-empty can make program slow.
2834 #ifndef VMA_HEAVY_ASSERT
2835  #ifdef _DEBUG
2836  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2837  #else
2838  #define VMA_HEAVY_ASSERT(expr)
2839  #endif
2840 #endif
2841 
2842 #ifndef VMA_ALIGN_OF
2843  #define VMA_ALIGN_OF(type) (__alignof(type))
2844 #endif
2845 
2846 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2847  #if defined(_WIN32)
2848  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2849  #else
2850  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2851  #endif
2852 #endif
2853 
2854 #ifndef VMA_SYSTEM_FREE
2855  #if defined(_WIN32)
2856  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2857  #else
2858  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2859  #endif
2860 #endif
2861 
2862 #ifndef VMA_MIN
2863  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2864 #endif
2865 
2866 #ifndef VMA_MAX
2867  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2868 #endif
2869 
2870 #ifndef VMA_SWAP
2871  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2872 #endif
2873 
2874 #ifndef VMA_SORT
2875  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2876 #endif
2877 
2878 #ifndef VMA_DEBUG_LOG
2879  #define VMA_DEBUG_LOG(format, ...)
2880  /*
2881  #define VMA_DEBUG_LOG(format, ...) do { \
2882  printf(format, __VA_ARGS__); \
2883  printf("\n"); \
2884  } while(false)
2885  */
2886 #endif
2887 
2888 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2889 #if VMA_STATS_STRING_ENABLED
2890  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2891  {
2892  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2893  }
2894  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2895  {
2896  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2897  }
2898  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2899  {
2900  snprintf(outStr, strLen, "%p", ptr);
2901  }
2902 #endif
2903 
2904 #ifndef VMA_MUTEX
2905  class VmaMutex
2906  {
2907  public:
2908  VmaMutex() { }
2909  ~VmaMutex() { }
2910  void Lock() { m_Mutex.lock(); }
2911  void Unlock() { m_Mutex.unlock(); }
2912  private:
2913  std::mutex m_Mutex;
2914  };
2915  #define VMA_MUTEX VmaMutex
2916 #endif
2917 
2918 /*
2919 If providing your own implementation, you need to implement a subset of std::atomic:
2920 
2921 - Constructor(uint32_t desired)
2922 - uint32_t load() const
2923 - void store(uint32_t desired)
2924 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2925 */
2926 #ifndef VMA_ATOMIC_UINT32
2927  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2928 #endif
2929 
2930 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2931 
2935  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2936 #endif
2937 
2938 #ifndef VMA_DEBUG_ALIGNMENT
2939 
2943  #define VMA_DEBUG_ALIGNMENT (1)
2944 #endif
2945 
2946 #ifndef VMA_DEBUG_MARGIN
2947 
2951  #define VMA_DEBUG_MARGIN (0)
2952 #endif
2953 
2954 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2955 
2959  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2960 #endif
2961 
2962 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2963 
2968  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2969 #endif
2970 
2971 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2972 
2976  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2977 #endif
2978 
2979 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2980 
2984  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2985 #endif
2986 
2987 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2988  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2990 #endif
2991 
2992 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2993  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2995 #endif
2996 
2997 #ifndef VMA_CLASS_NO_COPY
2998  #define VMA_CLASS_NO_COPY(className) \
2999  private: \
3000  className(const className&) = delete; \
3001  className& operator=(const className&) = delete;
3002 #endif
3003 
3004 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3005 
3006 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3007 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3008 
3009 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3010 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3011 
3012 /*******************************************************************************
3013 END OF CONFIGURATION
3014 */
3015 
3016 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3017  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3018 
3019 // Returns number of bits set to 1 in (v).
3020 static inline uint32_t VmaCountBitsSet(uint32_t v)
3021 {
3022  uint32_t c = v - ((v >> 1) & 0x55555555);
3023  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3024  c = ((c >> 4) + c) & 0x0F0F0F0F;
3025  c = ((c >> 8) + c) & 0x00FF00FF;
3026  c = ((c >> 16) + c) & 0x0000FFFF;
3027  return c;
3028 }
3029 
3030 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3031 // Use types like uint32_t, uint64_t as T.
3032 template <typename T>
3033 static inline T VmaAlignUp(T val, T align)
3034 {
3035  return (val + align - 1) / align * align;
3036 }
3037 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3038 // Use types like uint32_t, uint64_t as T.
3039 template <typename T>
3040 static inline T VmaAlignDown(T val, T align)
3041 {
3042  return val / align * align;
3043 }
3044 
3045 // Division with mathematical rounding to nearest number.
3046 template <typename T>
3047 static inline T VmaRoundDiv(T x, T y)
3048 {
3049  return (x + (y / (T)2)) / y;
3050 }
3051 
3052 /*
3053 Returns true if given number is a power of two.
3054 T must be unsigned integer number or signed integer but always nonnegative.
3055 For 0 returns true.
3056 */
3057 template <typename T>
3058 inline bool VmaIsPow2(T x)
3059 {
3060  return (x & (x-1)) == 0;
3061 }
3062 
3063 // Returns smallest power of 2 greater or equal to v.
3064 static inline uint32_t VmaNextPow2(uint32_t v)
3065 {
3066  v--;
3067  v |= v >> 1;
3068  v |= v >> 2;
3069  v |= v >> 4;
3070  v |= v >> 8;
3071  v |= v >> 16;
3072  v++;
3073  return v;
3074 }
3075 static inline uint64_t VmaNextPow2(uint64_t v)
3076 {
3077  v--;
3078  v |= v >> 1;
3079  v |= v >> 2;
3080  v |= v >> 4;
3081  v |= v >> 8;
3082  v |= v >> 16;
3083  v |= v >> 32;
3084  v++;
3085  return v;
3086 }
3087 
3088 // Returns largest power of 2 less or equal to v.
3089 static inline uint32_t VmaPrevPow2(uint32_t v)
3090 {
3091  v |= v >> 1;
3092  v |= v >> 2;
3093  v |= v >> 4;
3094  v |= v >> 8;
3095  v |= v >> 16;
3096  v = v ^ (v >> 1);
3097  return v;
3098 }
3099 static inline uint64_t VmaPrevPow2(uint64_t v)
3100 {
3101  v |= v >> 1;
3102  v |= v >> 2;
3103  v |= v >> 4;
3104  v |= v >> 8;
3105  v |= v >> 16;
3106  v |= v >> 32;
3107  v = v ^ (v >> 1);
3108  return v;
3109 }
3110 
3111 static inline bool VmaStrIsEmpty(const char* pStr)
3112 {
3113  return pStr == VMA_NULL || *pStr == '\0';
3114 }
3115 
3116 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3117 {
3118  switch(algorithm)
3119  {
3121  return "Linear";
3123  return "Buddy";
3124  case 0:
3125  return "Default";
3126  default:
3127  VMA_ASSERT(0);
3128  return "";
3129  }
3130 }
3131 
3132 #ifndef VMA_SORT
3133 
3134 template<typename Iterator, typename Compare>
3135 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3136 {
3137  Iterator centerValue = end; --centerValue;
3138  Iterator insertIndex = beg;
3139  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3140  {
3141  if(cmp(*memTypeIndex, *centerValue))
3142  {
3143  if(insertIndex != memTypeIndex)
3144  {
3145  VMA_SWAP(*memTypeIndex, *insertIndex);
3146  }
3147  ++insertIndex;
3148  }
3149  }
3150  if(insertIndex != centerValue)
3151  {
3152  VMA_SWAP(*insertIndex, *centerValue);
3153  }
3154  return insertIndex;
3155 }
3156 
3157 template<typename Iterator, typename Compare>
3158 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3159 {
3160  if(beg < end)
3161  {
3162  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3163  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3164  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3165  }
3166 }
3167 
3168 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3169 
3170 #endif // #ifndef VMA_SORT
3171 
3172 /*
3173 Returns true if two memory blocks occupy overlapping pages.
3174 ResourceA must be in less memory offset than ResourceB.
3175 
3176 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3177 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3178 */
3179 static inline bool VmaBlocksOnSamePage(
3180  VkDeviceSize resourceAOffset,
3181  VkDeviceSize resourceASize,
3182  VkDeviceSize resourceBOffset,
3183  VkDeviceSize pageSize)
3184 {
3185  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3186  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3187  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3188  VkDeviceSize resourceBStart = resourceBOffset;
3189  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3190  return resourceAEndPage == resourceBStartPage;
3191 }
3192 
3193 enum VmaSuballocationType
3194 {
3195  VMA_SUBALLOCATION_TYPE_FREE = 0,
3196  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3197  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3198  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3199  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3200  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3201  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3202 };
3203 
3204 /*
3205 Returns true if given suballocation types could conflict and must respect
3206 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3207 or linear image and another one is optimal image. If type is unknown, behave
3208 conservatively.
3209 */
3210 static inline bool VmaIsBufferImageGranularityConflict(
3211  VmaSuballocationType suballocType1,
3212  VmaSuballocationType suballocType2)
3213 {
3214  if(suballocType1 > suballocType2)
3215  {
3216  VMA_SWAP(suballocType1, suballocType2);
3217  }
3218 
3219  switch(suballocType1)
3220  {
3221  case VMA_SUBALLOCATION_TYPE_FREE:
3222  return false;
3223  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3224  return true;
3225  case VMA_SUBALLOCATION_TYPE_BUFFER:
3226  return
3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3228  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3229  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3230  return
3231  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3232  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3233  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3234  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3235  return
3236  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3237  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3238  return false;
3239  default:
3240  VMA_ASSERT(0);
3241  return true;
3242  }
3243 }
3244 
3245 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3246 {
3247  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3248  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3249  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3250  {
3251  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3252  }
3253 }
3254 
3255 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3256 {
3257  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3258  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3259  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3260  {
3261  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3262  {
3263  return false;
3264  }
3265  }
3266  return true;
3267 }
3268 
3269 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3270 struct VmaMutexLock
3271 {
3272  VMA_CLASS_NO_COPY(VmaMutexLock)
3273 public:
3274  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3275  m_pMutex(useMutex ? &mutex : VMA_NULL)
3276  {
3277  if(m_pMutex)
3278  {
3279  m_pMutex->Lock();
3280  }
3281  }
3282 
3283  ~VmaMutexLock()
3284  {
3285  if(m_pMutex)
3286  {
3287  m_pMutex->Unlock();
3288  }
3289  }
3290 
3291 private:
3292  VMA_MUTEX* m_pMutex;
3293 };
3294 
3295 #if VMA_DEBUG_GLOBAL_MUTEX
3296  static VMA_MUTEX gDebugGlobalMutex;
3297  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3298 #else
3299  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3300 #endif
3301 
3302 // Minimum size of a free suballocation to register it in the free suballocation collection.
3303 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3304 
3305 /*
3306 Performs binary search and returns iterator to first element that is greater or
3307 equal to (key), according to comparison (cmp).
3308 
3309 Cmp should return true if first argument is less than second argument.
3310 
3311 Returned value is the found element, if present in the collection or place where
3312 new element with value (key) should be inserted.
3313 */
3314 template <typename CmpLess, typename IterT, typename KeyT>
3315 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3316 {
3317  size_t down = 0, up = (end - beg);
3318  while(down < up)
3319  {
3320  const size_t mid = (down + up) / 2;
3321  if(cmp(*(beg+mid), key))
3322  {
3323  down = mid + 1;
3324  }
3325  else
3326  {
3327  up = mid;
3328  }
3329  }
3330  return beg + down;
3331 }
3332 
3334 // Memory allocation
3335 
3336 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3337 {
3338  if((pAllocationCallbacks != VMA_NULL) &&
3339  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3340  {
3341  return (*pAllocationCallbacks->pfnAllocation)(
3342  pAllocationCallbacks->pUserData,
3343  size,
3344  alignment,
3345  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3346  }
3347  else
3348  {
3349  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3350  }
3351 }
3352 
3353 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3354 {
3355  if((pAllocationCallbacks != VMA_NULL) &&
3356  (pAllocationCallbacks->pfnFree != VMA_NULL))
3357  {
3358  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3359  }
3360  else
3361  {
3362  VMA_SYSTEM_FREE(ptr);
3363  }
3364 }
3365 
3366 template<typename T>
3367 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3368 {
3369  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3370 }
3371 
3372 template<typename T>
3373 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3374 {
3375  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3376 }
3377 
3378 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3379 
3380 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3381 
3382 template<typename T>
3383 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3384 {
3385  ptr->~T();
3386  VmaFree(pAllocationCallbacks, ptr);
3387 }
3388 
3389 template<typename T>
3390 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3391 {
3392  if(ptr != VMA_NULL)
3393  {
3394  for(size_t i = count; i--; )
3395  {
3396  ptr[i].~T();
3397  }
3398  VmaFree(pAllocationCallbacks, ptr);
3399  }
3400 }
3401 
3402 // STL-compatible allocator.
3403 template<typename T>
3404 class VmaStlAllocator
3405 {
3406 public:
3407  const VkAllocationCallbacks* const m_pCallbacks;
3408  typedef T value_type;
3409 
3410  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3411  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3412 
3413  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3414  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3415 
3416  template<typename U>
3417  bool operator==(const VmaStlAllocator<U>& rhs) const
3418  {
3419  return m_pCallbacks == rhs.m_pCallbacks;
3420  }
3421  template<typename U>
3422  bool operator!=(const VmaStlAllocator<U>& rhs) const
3423  {
3424  return m_pCallbacks != rhs.m_pCallbacks;
3425  }
3426 
3427  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3428 };
3429 
3430 #if VMA_USE_STL_VECTOR
3431 
3432 #define VmaVector std::vector
3433 
3434 template<typename T, typename allocatorT>
3435 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3436 {
3437  vec.insert(vec.begin() + index, item);
3438 }
3439 
3440 template<typename T, typename allocatorT>
3441 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3442 {
3443  vec.erase(vec.begin() + index);
3444 }
3445 
3446 #else // #if VMA_USE_STL_VECTOR
3447 
3448 /* Class with interface compatible with subset of std::vector.
3449 T must be POD because constructors and destructors are not called and memcpy is
3450 used for these objects. */
3451 template<typename T, typename AllocatorT>
3452 class VmaVector
3453 {
3454 public:
3455  typedef T value_type;
3456 
3457  VmaVector(const AllocatorT& allocator) :
3458  m_Allocator(allocator),
3459  m_pArray(VMA_NULL),
3460  m_Count(0),
3461  m_Capacity(0)
3462  {
3463  }
3464 
3465  VmaVector(size_t count, const AllocatorT& allocator) :
3466  m_Allocator(allocator),
3467  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3468  m_Count(count),
3469  m_Capacity(count)
3470  {
3471  }
3472 
3473  VmaVector(const VmaVector<T, AllocatorT>& src) :
3474  m_Allocator(src.m_Allocator),
3475  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3476  m_Count(src.m_Count),
3477  m_Capacity(src.m_Count)
3478  {
3479  if(m_Count != 0)
3480  {
3481  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3482  }
3483  }
3484 
3485  ~VmaVector()
3486  {
3487  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3488  }
3489 
3490  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3491  {
3492  if(&rhs != this)
3493  {
3494  resize(rhs.m_Count);
3495  if(m_Count != 0)
3496  {
3497  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3498  }
3499  }
3500  return *this;
3501  }
3502 
3503  bool empty() const { return m_Count == 0; }
3504  size_t size() const { return m_Count; }
3505  T* data() { return m_pArray; }
3506  const T* data() const { return m_pArray; }
3507 
3508  T& operator[](size_t index)
3509  {
3510  VMA_HEAVY_ASSERT(index < m_Count);
3511  return m_pArray[index];
3512  }
3513  const T& operator[](size_t index) const
3514  {
3515  VMA_HEAVY_ASSERT(index < m_Count);
3516  return m_pArray[index];
3517  }
3518 
3519  T& front()
3520  {
3521  VMA_HEAVY_ASSERT(m_Count > 0);
3522  return m_pArray[0];
3523  }
3524  const T& front() const
3525  {
3526  VMA_HEAVY_ASSERT(m_Count > 0);
3527  return m_pArray[0];
3528  }
3529  T& back()
3530  {
3531  VMA_HEAVY_ASSERT(m_Count > 0);
3532  return m_pArray[m_Count - 1];
3533  }
3534  const T& back() const
3535  {
3536  VMA_HEAVY_ASSERT(m_Count > 0);
3537  return m_pArray[m_Count - 1];
3538  }
3539 
3540  void reserve(size_t newCapacity, bool freeMemory = false)
3541  {
3542  newCapacity = VMA_MAX(newCapacity, m_Count);
3543 
3544  if((newCapacity < m_Capacity) && !freeMemory)
3545  {
3546  newCapacity = m_Capacity;
3547  }
3548 
3549  if(newCapacity != m_Capacity)
3550  {
3551  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3552  if(m_Count != 0)
3553  {
3554  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3555  }
3556  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3557  m_Capacity = newCapacity;
3558  m_pArray = newArray;
3559  }
3560  }
3561 
3562  void resize(size_t newCount, bool freeMemory = false)
3563  {
3564  size_t newCapacity = m_Capacity;
3565  if(newCount > m_Capacity)
3566  {
3567  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3568  }
3569  else if(freeMemory)
3570  {
3571  newCapacity = newCount;
3572  }
3573 
3574  if(newCapacity != m_Capacity)
3575  {
3576  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3577  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3578  if(elementsToCopy != 0)
3579  {
3580  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3581  }
3582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3583  m_Capacity = newCapacity;
3584  m_pArray = newArray;
3585  }
3586 
3587  m_Count = newCount;
3588  }
3589 
3590  void clear(bool freeMemory = false)
3591  {
3592  resize(0, freeMemory);
3593  }
3594 
3595  void insert(size_t index, const T& src)
3596  {
3597  VMA_HEAVY_ASSERT(index <= m_Count);
3598  const size_t oldCount = size();
3599  resize(oldCount + 1);
3600  if(index < oldCount)
3601  {
3602  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3603  }
3604  m_pArray[index] = src;
3605  }
3606 
3607  void remove(size_t index)
3608  {
3609  VMA_HEAVY_ASSERT(index < m_Count);
3610  const size_t oldCount = size();
3611  if(index < oldCount - 1)
3612  {
3613  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3614  }
3615  resize(oldCount - 1);
3616  }
3617 
3618  void push_back(const T& src)
3619  {
3620  const size_t newIndex = size();
3621  resize(newIndex + 1);
3622  m_pArray[newIndex] = src;
3623  }
3624 
3625  void pop_back()
3626  {
3627  VMA_HEAVY_ASSERT(m_Count > 0);
3628  resize(size() - 1);
3629  }
3630 
3631  void push_front(const T& src)
3632  {
3633  insert(0, src);
3634  }
3635 
3636  void pop_front()
3637  {
3638  VMA_HEAVY_ASSERT(m_Count > 0);
3639  remove(0);
3640  }
3641 
3642  typedef T* iterator;
3643 
3644  iterator begin() { return m_pArray; }
3645  iterator end() { return m_pArray + m_Count; }
3646 
3647 private:
3648  AllocatorT m_Allocator;
3649  T* m_pArray;
3650  size_t m_Count;
3651  size_t m_Capacity;
3652 };
3653 
3654 template<typename T, typename allocatorT>
3655 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3656 {
3657  vec.insert(index, item);
3658 }
3659 
3660 template<typename T, typename allocatorT>
3661 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3662 {
3663  vec.remove(index);
3664 }
3665 
3666 #endif // #if VMA_USE_STL_VECTOR
3667 
3668 template<typename CmpLess, typename VectorT>
3669 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3670 {
3671  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3672  vector.data(),
3673  vector.data() + vector.size(),
3674  value,
3675  CmpLess()) - vector.data();
3676  VmaVectorInsert(vector, indexToInsert, value);
3677  return indexToInsert;
3678 }
3679 
3680 template<typename CmpLess, typename VectorT>
3681 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3682 {
3683  CmpLess comparator;
3684  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3685  vector.begin(),
3686  vector.end(),
3687  value,
3688  comparator);
3689  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3690  {
3691  size_t indexToRemove = it - vector.begin();
3692  VmaVectorRemove(vector, indexToRemove);
3693  return true;
3694  }
3695  return false;
3696 }
3697 
3698 template<typename CmpLess, typename IterT, typename KeyT>
3699 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3700 {
3701  CmpLess comparator;
3702  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3703  beg, end, value, comparator);
3704  if(it == end ||
3705  (!comparator(*it, value) && !comparator(value, *it)))
3706  {
3707  return it;
3708  }
3709  return end;
3710 }
3711 
3713 // class VmaPoolAllocator
3714 
3715 /*
3716 Allocator for objects of type T using a list of arrays (pools) to speed up
3717 allocation. Number of elements that can be allocated is not bounded because
3718 allocator can create multiple blocks.
3719 */
3720 template<typename T>
3721 class VmaPoolAllocator
3722 {
3723  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3724 public:
3725  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3726  ~VmaPoolAllocator();
3727  void Clear();
3728  T* Alloc();
3729  void Free(T* ptr);
3730 
3731 private:
3732  union Item
3733  {
3734  uint32_t NextFreeIndex;
3735  T Value;
3736  };
3737 
3738  struct ItemBlock
3739  {
3740  Item* pItems;
3741  uint32_t FirstFreeIndex;
3742  };
3743 
3744  const VkAllocationCallbacks* m_pAllocationCallbacks;
3745  size_t m_ItemsPerBlock;
3746  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3747 
3748  ItemBlock& CreateNewBlock();
3749 };
3750 
3751 template<typename T>
3752 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3753  m_pAllocationCallbacks(pAllocationCallbacks),
3754  m_ItemsPerBlock(itemsPerBlock),
3755  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3756 {
3757  VMA_ASSERT(itemsPerBlock > 0);
3758 }
3759 
3760 template<typename T>
3761 VmaPoolAllocator<T>::~VmaPoolAllocator()
3762 {
3763  Clear();
3764 }
3765 
3766 template<typename T>
3767 void VmaPoolAllocator<T>::Clear()
3768 {
3769  for(size_t i = m_ItemBlocks.size(); i--; )
3770  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3771  m_ItemBlocks.clear();
3772 }
3773 
3774 template<typename T>
3775 T* VmaPoolAllocator<T>::Alloc()
3776 {
3777  for(size_t i = m_ItemBlocks.size(); i--; )
3778  {
3779  ItemBlock& block = m_ItemBlocks[i];
3780  // This block has some free items: Use first one.
3781  if(block.FirstFreeIndex != UINT32_MAX)
3782  {
3783  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3784  block.FirstFreeIndex = pItem->NextFreeIndex;
3785  return &pItem->Value;
3786  }
3787  }
3788 
3789  // No block has free item: Create new one and use it.
3790  ItemBlock& newBlock = CreateNewBlock();
3791  Item* const pItem = &newBlock.pItems[0];
3792  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3793  return &pItem->Value;
3794 }
3795 
3796 template<typename T>
3797 void VmaPoolAllocator<T>::Free(T* ptr)
3798 {
3799  // Search all memory blocks to find ptr.
3800  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3801  {
3802  ItemBlock& block = m_ItemBlocks[i];
3803 
3804  // Casting to union.
3805  Item* pItemPtr;
3806  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3807 
3808  // Check if pItemPtr is in address range of this block.
3809  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3810  {
3811  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3812  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3813  block.FirstFreeIndex = index;
3814  return;
3815  }
3816  }
3817  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3818 }
3819 
3820 template<typename T>
3821 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3822 {
3823  ItemBlock newBlock = {
3824  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3825 
3826  m_ItemBlocks.push_back(newBlock);
3827 
3828  // Setup singly-linked list of all free items in this block.
3829  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3830  newBlock.pItems[i].NextFreeIndex = i + 1;
3831  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3832  return m_ItemBlocks.back();
3833 }
3834 
3836 // class VmaRawList, VmaList
3837 
3838 #if VMA_USE_STL_LIST
3839 
3840 #define VmaList std::list
3841 
3842 #else // #if VMA_USE_STL_LIST
3843 
3844 template<typename T>
3845 struct VmaListItem
3846 {
3847  VmaListItem* pPrev;
3848  VmaListItem* pNext;
3849  T Value;
3850 };
3851 
3852 // Doubly linked list.
3853 template<typename T>
3854 class VmaRawList
3855 {
3856  VMA_CLASS_NO_COPY(VmaRawList)
3857 public:
3858  typedef VmaListItem<T> ItemType;
3859 
3860  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3861  ~VmaRawList();
3862  void Clear();
3863 
3864  size_t GetCount() const { return m_Count; }
3865  bool IsEmpty() const { return m_Count == 0; }
3866 
3867  ItemType* Front() { return m_pFront; }
3868  const ItemType* Front() const { return m_pFront; }
3869  ItemType* Back() { return m_pBack; }
3870  const ItemType* Back() const { return m_pBack; }
3871 
3872  ItemType* PushBack();
3873  ItemType* PushFront();
3874  ItemType* PushBack(const T& value);
3875  ItemType* PushFront(const T& value);
3876  void PopBack();
3877  void PopFront();
3878 
3879  // Item can be null - it means PushBack.
3880  ItemType* InsertBefore(ItemType* pItem);
3881  // Item can be null - it means PushFront.
3882  ItemType* InsertAfter(ItemType* pItem);
3883 
3884  ItemType* InsertBefore(ItemType* pItem, const T& value);
3885  ItemType* InsertAfter(ItemType* pItem, const T& value);
3886 
3887  void Remove(ItemType* pItem);
3888 
3889 private:
3890  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3891  VmaPoolAllocator<ItemType> m_ItemAllocator;
3892  ItemType* m_pFront;
3893  ItemType* m_pBack;
3894  size_t m_Count;
3895 };
3896 
3897 template<typename T>
3898 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3899  m_pAllocationCallbacks(pAllocationCallbacks),
3900  m_ItemAllocator(pAllocationCallbacks, 128),
3901  m_pFront(VMA_NULL),
3902  m_pBack(VMA_NULL),
3903  m_Count(0)
3904 {
3905 }
3906 
3907 template<typename T>
3908 VmaRawList<T>::~VmaRawList()
3909 {
3910  // Intentionally not calling Clear, because that would be unnecessary
3911  // computations to return all items to m_ItemAllocator as free.
3912 }
3913 
3914 template<typename T>
3915 void VmaRawList<T>::Clear()
3916 {
3917  if(IsEmpty() == false)
3918  {
3919  ItemType* pItem = m_pBack;
3920  while(pItem != VMA_NULL)
3921  {
3922  ItemType* const pPrevItem = pItem->pPrev;
3923  m_ItemAllocator.Free(pItem);
3924  pItem = pPrevItem;
3925  }
3926  m_pFront = VMA_NULL;
3927  m_pBack = VMA_NULL;
3928  m_Count = 0;
3929  }
3930 }
3931 
3932 template<typename T>
3933 VmaListItem<T>* VmaRawList<T>::PushBack()
3934 {
3935  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3936  pNewItem->pNext = VMA_NULL;
3937  if(IsEmpty())
3938  {
3939  pNewItem->pPrev = VMA_NULL;
3940  m_pFront = pNewItem;
3941  m_pBack = pNewItem;
3942  m_Count = 1;
3943  }
3944  else
3945  {
3946  pNewItem->pPrev = m_pBack;
3947  m_pBack->pNext = pNewItem;
3948  m_pBack = pNewItem;
3949  ++m_Count;
3950  }
3951  return pNewItem;
3952 }
3953 
3954 template<typename T>
3955 VmaListItem<T>* VmaRawList<T>::PushFront()
3956 {
3957  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3958  pNewItem->pPrev = VMA_NULL;
3959  if(IsEmpty())
3960  {
3961  pNewItem->pNext = VMA_NULL;
3962  m_pFront = pNewItem;
3963  m_pBack = pNewItem;
3964  m_Count = 1;
3965  }
3966  else
3967  {
3968  pNewItem->pNext = m_pFront;
3969  m_pFront->pPrev = pNewItem;
3970  m_pFront = pNewItem;
3971  ++m_Count;
3972  }
3973  return pNewItem;
3974 }
3975 
3976 template<typename T>
3977 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3978 {
3979  ItemType* const pNewItem = PushBack();
3980  pNewItem->Value = value;
3981  return pNewItem;
3982 }
3983 
3984 template<typename T>
3985 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3986 {
3987  ItemType* const pNewItem = PushFront();
3988  pNewItem->Value = value;
3989  return pNewItem;
3990 }
3991 
3992 template<typename T>
3993 void VmaRawList<T>::PopBack()
3994 {
3995  VMA_HEAVY_ASSERT(m_Count > 0);
3996  ItemType* const pBackItem = m_pBack;
3997  ItemType* const pPrevItem = pBackItem->pPrev;
3998  if(pPrevItem != VMA_NULL)
3999  {
4000  pPrevItem->pNext = VMA_NULL;
4001  }
4002  m_pBack = pPrevItem;
4003  m_ItemAllocator.Free(pBackItem);
4004  --m_Count;
4005 }
4006 
4007 template<typename T>
4008 void VmaRawList<T>::PopFront()
4009 {
4010  VMA_HEAVY_ASSERT(m_Count > 0);
4011  ItemType* const pFrontItem = m_pFront;
4012  ItemType* const pNextItem = pFrontItem->pNext;
4013  if(pNextItem != VMA_NULL)
4014  {
4015  pNextItem->pPrev = VMA_NULL;
4016  }
4017  m_pFront = pNextItem;
4018  m_ItemAllocator.Free(pFrontItem);
4019  --m_Count;
4020 }
4021 
4022 template<typename T>
4023 void VmaRawList<T>::Remove(ItemType* pItem)
4024 {
4025  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4026  VMA_HEAVY_ASSERT(m_Count > 0);
4027 
4028  if(pItem->pPrev != VMA_NULL)
4029  {
4030  pItem->pPrev->pNext = pItem->pNext;
4031  }
4032  else
4033  {
4034  VMA_HEAVY_ASSERT(m_pFront == pItem);
4035  m_pFront = pItem->pNext;
4036  }
4037 
4038  if(pItem->pNext != VMA_NULL)
4039  {
4040  pItem->pNext->pPrev = pItem->pPrev;
4041  }
4042  else
4043  {
4044  VMA_HEAVY_ASSERT(m_pBack == pItem);
4045  m_pBack = pItem->pPrev;
4046  }
4047 
4048  m_ItemAllocator.Free(pItem);
4049  --m_Count;
4050 }
4051 
4052 template<typename T>
4053 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4054 {
4055  if(pItem != VMA_NULL)
4056  {
4057  ItemType* const prevItem = pItem->pPrev;
4058  ItemType* const newItem = m_ItemAllocator.Alloc();
4059  newItem->pPrev = prevItem;
4060  newItem->pNext = pItem;
4061  pItem->pPrev = newItem;
4062  if(prevItem != VMA_NULL)
4063  {
4064  prevItem->pNext = newItem;
4065  }
4066  else
4067  {
4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
4069  m_pFront = newItem;
4070  }
4071  ++m_Count;
4072  return newItem;
4073  }
4074  else
4075  return PushBack();
4076 }
4077 
4078 template<typename T>
4079 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4080 {
4081  if(pItem != VMA_NULL)
4082  {
4083  ItemType* const nextItem = pItem->pNext;
4084  ItemType* const newItem = m_ItemAllocator.Alloc();
4085  newItem->pNext = nextItem;
4086  newItem->pPrev = pItem;
4087  pItem->pNext = newItem;
4088  if(nextItem != VMA_NULL)
4089  {
4090  nextItem->pPrev = newItem;
4091  }
4092  else
4093  {
4094  VMA_HEAVY_ASSERT(m_pBack == pItem);
4095  m_pBack = newItem;
4096  }
4097  ++m_Count;
4098  return newItem;
4099  }
4100  else
4101  return PushFront();
4102 }
4103 
4104 template<typename T>
4105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4106 {
4107  ItemType* const newItem = InsertBefore(pItem);
4108  newItem->Value = value;
4109  return newItem;
4110 }
4111 
4112 template<typename T>
4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4114 {
4115  ItemType* const newItem = InsertAfter(pItem);
4116  newItem->Value = value;
4117  return newItem;
4118 }
4119 
4120 template<typename T, typename AllocatorT>
4121 class VmaList
4122 {
4123  VMA_CLASS_NO_COPY(VmaList)
4124 public:
4125  class iterator
4126  {
4127  public:
4128  iterator() :
4129  m_pList(VMA_NULL),
4130  m_pItem(VMA_NULL)
4131  {
4132  }
4133 
4134  T& operator*() const
4135  {
4136  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4137  return m_pItem->Value;
4138  }
4139  T* operator->() const
4140  {
4141  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4142  return &m_pItem->Value;
4143  }
4144 
4145  iterator& operator++()
4146  {
4147  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4148  m_pItem = m_pItem->pNext;
4149  return *this;
4150  }
4151  iterator& operator--()
4152  {
4153  if(m_pItem != VMA_NULL)
4154  {
4155  m_pItem = m_pItem->pPrev;
4156  }
4157  else
4158  {
4159  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4160  m_pItem = m_pList->Back();
4161  }
4162  return *this;
4163  }
4164 
4165  iterator operator++(int)
4166  {
4167  iterator result = *this;
4168  ++*this;
4169  return result;
4170  }
4171  iterator operator--(int)
4172  {
4173  iterator result = *this;
4174  --*this;
4175  return result;
4176  }
4177 
4178  bool operator==(const iterator& rhs) const
4179  {
4180  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4181  return m_pItem == rhs.m_pItem;
4182  }
4183  bool operator!=(const iterator& rhs) const
4184  {
4185  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4186  return m_pItem != rhs.m_pItem;
4187  }
4188 
4189  private:
4190  VmaRawList<T>* m_pList;
4191  VmaListItem<T>* m_pItem;
4192 
4193  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4194  m_pList(pList),
4195  m_pItem(pItem)
4196  {
4197  }
4198 
4199  friend class VmaList<T, AllocatorT>;
4200  };
4201 
4202  class const_iterator
4203  {
4204  public:
4205  const_iterator() :
4206  m_pList(VMA_NULL),
4207  m_pItem(VMA_NULL)
4208  {
4209  }
4210 
4211  const_iterator(const iterator& src) :
4212  m_pList(src.m_pList),
4213  m_pItem(src.m_pItem)
4214  {
4215  }
4216 
4217  const T& operator*() const
4218  {
4219  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4220  return m_pItem->Value;
4221  }
4222  const T* operator->() const
4223  {
4224  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4225  return &m_pItem->Value;
4226  }
4227 
4228  const_iterator& operator++()
4229  {
4230  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4231  m_pItem = m_pItem->pNext;
4232  return *this;
4233  }
4234  const_iterator& operator--()
4235  {
4236  if(m_pItem != VMA_NULL)
4237  {
4238  m_pItem = m_pItem->pPrev;
4239  }
4240  else
4241  {
4242  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4243  m_pItem = m_pList->Back();
4244  }
4245  return *this;
4246  }
4247 
4248  const_iterator operator++(int)
4249  {
4250  const_iterator result = *this;
4251  ++*this;
4252  return result;
4253  }
4254  const_iterator operator--(int)
4255  {
4256  const_iterator result = *this;
4257  --*this;
4258  return result;
4259  }
4260 
4261  bool operator==(const const_iterator& rhs) const
4262  {
4263  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4264  return m_pItem == rhs.m_pItem;
4265  }
4266  bool operator!=(const const_iterator& rhs) const
4267  {
4268  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4269  return m_pItem != rhs.m_pItem;
4270  }
4271 
4272  private:
4273  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4274  m_pList(pList),
4275  m_pItem(pItem)
4276  {
4277  }
4278 
4279  const VmaRawList<T>* m_pList;
4280  const VmaListItem<T>* m_pItem;
4281 
4282  friend class VmaList<T, AllocatorT>;
4283  };
4284 
4285  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4286 
4287  bool empty() const { return m_RawList.IsEmpty(); }
4288  size_t size() const { return m_RawList.GetCount(); }
4289 
4290  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4291  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4292 
4293  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4294  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4295 
4296  void clear() { m_RawList.Clear(); }
4297  void push_back(const T& value) { m_RawList.PushBack(value); }
4298  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4299  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4300 
4301 private:
4302  VmaRawList<T> m_RawList;
4303 };
4304 
4305 #endif // #if VMA_USE_STL_LIST
4306 
4308 // class VmaMap
4309 
4310 // Unused in this version.
4311 #if 0
4312 
4313 #if VMA_USE_STL_UNORDERED_MAP
4314 
4315 #define VmaPair std::pair
4316 
4317 #define VMA_MAP_TYPE(KeyT, ValueT) \
4318  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4319 
4320 #else // #if VMA_USE_STL_UNORDERED_MAP
4321 
4322 template<typename T1, typename T2>
4323 struct VmaPair
4324 {
4325  T1 first;
4326  T2 second;
4327 
4328  VmaPair() : first(), second() { }
4329  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4330 };
4331 
4332 /* Class compatible with subset of interface of std::unordered_map.
4333 KeyT, ValueT must be POD because they will be stored in VmaVector.
4334 */
4335 template<typename KeyT, typename ValueT>
4336 class VmaMap
4337 {
4338 public:
4339  typedef VmaPair<KeyT, ValueT> PairType;
4340  typedef PairType* iterator;
4341 
4342  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4343 
4344  iterator begin() { return m_Vector.begin(); }
4345  iterator end() { return m_Vector.end(); }
4346 
4347  void insert(const PairType& pair);
4348  iterator find(const KeyT& key);
4349  void erase(iterator it);
4350 
4351 private:
4352  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4353 };
4354 
4355 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4356 
4357 template<typename FirstT, typename SecondT>
4358 struct VmaPairFirstLess
4359 {
4360  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4361  {
4362  return lhs.first < rhs.first;
4363  }
4364  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4365  {
4366  return lhs.first < rhsFirst;
4367  }
4368 };
4369 
4370 template<typename KeyT, typename ValueT>
4371 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4372 {
4373  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4374  m_Vector.data(),
4375  m_Vector.data() + m_Vector.size(),
4376  pair,
4377  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4378  VmaVectorInsert(m_Vector, indexToInsert, pair);
4379 }
4380 
4381 template<typename KeyT, typename ValueT>
4382 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4383 {
4384  PairType* it = VmaBinaryFindFirstNotLess(
4385  m_Vector.data(),
4386  m_Vector.data() + m_Vector.size(),
4387  key,
4388  VmaPairFirstLess<KeyT, ValueT>());
4389  if((it != m_Vector.end()) && (it->first == key))
4390  {
4391  return it;
4392  }
4393  else
4394  {
4395  return m_Vector.end();
4396  }
4397 }
4398 
4399 template<typename KeyT, typename ValueT>
4400 void VmaMap<KeyT, ValueT>::erase(iterator it)
4401 {
4402  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4403 }
4404 
4405 #endif // #if VMA_USE_STL_UNORDERED_MAP
4406 
4407 #endif // #if 0
4408 
4410 
4411 class VmaDeviceMemoryBlock;
4412 
4413 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4414 
4415 struct VmaAllocation_T
4416 {
4417  VMA_CLASS_NO_COPY(VmaAllocation_T)
4418 private:
4419  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4420 
4421  enum FLAGS
4422  {
4423  FLAG_USER_DATA_STRING = 0x01,
4424  };
4425 
4426 public:
4427  enum ALLOCATION_TYPE
4428  {
4429  ALLOCATION_TYPE_NONE,
4430  ALLOCATION_TYPE_BLOCK,
4431  ALLOCATION_TYPE_DEDICATED,
4432  };
4433 
4434  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4435  m_Alignment(1),
4436  m_Size(0),
4437  m_pUserData(VMA_NULL),
4438  m_LastUseFrameIndex(currentFrameIndex),
4439  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4440  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4441  m_MapCount(0),
4442  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4443  {
4444 #if VMA_STATS_STRING_ENABLED
4445  m_CreationFrameIndex = currentFrameIndex;
4446  m_BufferImageUsage = 0;
4447 #endif
4448  }
4449 
4450  ~VmaAllocation_T()
4451  {
4452  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4453 
4454  // Check if owned string was freed.
4455  VMA_ASSERT(m_pUserData == VMA_NULL);
4456  }
4457 
4458  void InitBlockAllocation(
4459  VmaPool hPool,
4460  VmaDeviceMemoryBlock* block,
4461  VkDeviceSize offset,
4462  VkDeviceSize alignment,
4463  VkDeviceSize size,
4464  VmaSuballocationType suballocationType,
4465  bool mapped,
4466  bool canBecomeLost)
4467  {
4468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4469  VMA_ASSERT(block != VMA_NULL);
4470  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4471  m_Alignment = alignment;
4472  m_Size = size;
4473  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4474  m_SuballocationType = (uint8_t)suballocationType;
4475  m_BlockAllocation.m_hPool = hPool;
4476  m_BlockAllocation.m_Block = block;
4477  m_BlockAllocation.m_Offset = offset;
4478  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4479  }
4480 
4481  void InitLost()
4482  {
4483  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4484  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4485  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4486  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4487  m_BlockAllocation.m_Block = VMA_NULL;
4488  m_BlockAllocation.m_Offset = 0;
4489  m_BlockAllocation.m_CanBecomeLost = true;
4490  }
4491 
4492  void ChangeBlockAllocation(
4493  VmaAllocator hAllocator,
4494  VmaDeviceMemoryBlock* block,
4495  VkDeviceSize offset);
4496 
4497  // pMappedData not null means allocation is created with MAPPED flag.
4498  void InitDedicatedAllocation(
4499  uint32_t memoryTypeIndex,
4500  VkDeviceMemory hMemory,
4501  VmaSuballocationType suballocationType,
4502  void* pMappedData,
4503  VkDeviceSize size)
4504  {
4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4506  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4507  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4508  m_Alignment = 0;
4509  m_Size = size;
4510  m_SuballocationType = (uint8_t)suballocationType;
4511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4512  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4513  m_DedicatedAllocation.m_hMemory = hMemory;
4514  m_DedicatedAllocation.m_pMappedData = pMappedData;
4515  }
4516 
4517  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4518  VkDeviceSize GetAlignment() const { return m_Alignment; }
4519  VkDeviceSize GetSize() const { return m_Size; }
4520  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4521  void* GetUserData() const { return m_pUserData; }
4522  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4523  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4524 
4525  VmaDeviceMemoryBlock* GetBlock() const
4526  {
4527  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4528  return m_BlockAllocation.m_Block;
4529  }
4530  VkDeviceSize GetOffset() const;
4531  VkDeviceMemory GetMemory() const;
4532  uint32_t GetMemoryTypeIndex() const;
4533  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4534  void* GetMappedData() const;
4535  bool CanBecomeLost() const;
4536  VmaPool GetPool() const;
4537 
4538  uint32_t GetLastUseFrameIndex() const
4539  {
4540  return m_LastUseFrameIndex.load();
4541  }
4542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4543  {
4544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4545  }
4546  /*
4547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4549  - Else, returns false.
4550 
4551  If hAllocation is already lost, assert - you should not call it then.
4552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4553  */
4554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4555 
4556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4557  {
4558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4559  outInfo.blockCount = 1;
4560  outInfo.allocationCount = 1;
4561  outInfo.unusedRangeCount = 0;
4562  outInfo.usedBytes = m_Size;
4563  outInfo.unusedBytes = 0;
4564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4565  outInfo.unusedRangeSizeMin = UINT64_MAX;
4566  outInfo.unusedRangeSizeMax = 0;
4567  }
4568 
4569  void BlockAllocMap();
4570  void BlockAllocUnmap();
4571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4573 
4574 #if VMA_STATS_STRING_ENABLED
4575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4577 
4578  void InitBufferImageUsage(uint32_t bufferImageUsage)
4579  {
4580  VMA_ASSERT(m_BufferImageUsage == 0);
4581  m_BufferImageUsage = bufferImageUsage;
4582  }
4583 
4584  void PrintParameters(class VmaJsonWriter& json) const;
4585 #endif
4586 
4587 private:
4588  VkDeviceSize m_Alignment;
4589  VkDeviceSize m_Size;
4590  void* m_pUserData;
4591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4592  uint8_t m_Type; // ALLOCATION_TYPE
4593  uint8_t m_SuballocationType; // VmaSuballocationType
4594  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4595  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4596  uint8_t m_MapCount;
4597  uint8_t m_Flags; // enum FLAGS
4598 
4599  // Allocation out of VmaDeviceMemoryBlock.
4600  struct BlockAllocation
4601  {
4602  VmaPool m_hPool; // Null if belongs to general memory.
4603  VmaDeviceMemoryBlock* m_Block;
4604  VkDeviceSize m_Offset;
4605  bool m_CanBecomeLost;
4606  };
4607 
4608  // Allocation for an object that has its own private VkDeviceMemory.
4609  struct DedicatedAllocation
4610  {
4611  uint32_t m_MemoryTypeIndex;
4612  VkDeviceMemory m_hMemory;
4613  void* m_pMappedData; // Not null means memory is mapped.
4614  };
4615 
4616  union
4617  {
4618  // Allocation out of VmaDeviceMemoryBlock.
4619  BlockAllocation m_BlockAllocation;
4620  // Allocation for an object that has its own private VkDeviceMemory.
4621  DedicatedAllocation m_DedicatedAllocation;
4622  };
4623 
4624 #if VMA_STATS_STRING_ENABLED
4625  uint32_t m_CreationFrameIndex;
4626  uint32_t m_BufferImageUsage; // 0 if unknown.
4627 #endif
4628 
4629  void FreeUserDataString(VmaAllocator hAllocator);
4630 };
4631 
4632 /*
4633 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4634 allocated memory block or free.
4635 */
4636 struct VmaSuballocation
4637 {
4638  VkDeviceSize offset;
4639  VkDeviceSize size;
4640  VmaAllocation hAllocation;
4641  VmaSuballocationType type;
4642 };
4643 
4644 // Comparator for offsets.
4645 struct VmaSuballocationOffsetLess
4646 {
4647  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4648  {
4649  return lhs.offset < rhs.offset;
4650  }
4651 };
4652 struct VmaSuballocationOffsetGreater
4653 {
4654  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4655  {
4656  return lhs.offset > rhs.offset;
4657  }
4658 };
4659 
4660 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4661 
4662 // Cost of one additional allocation lost, as equivalent in bytes.
4663 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4664 
4665 /*
4666 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4667 
4668 If canMakeOtherLost was false:
4669 - item points to a FREE suballocation.
4670 - itemsToMakeLostCount is 0.
4671 
4672 If canMakeOtherLost was true:
4673 - item points to first of sequence of suballocations, which are either FREE,
4674  or point to VmaAllocations that can become lost.
4675 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4676  the requested allocation to succeed.
4677 */
4678 struct VmaAllocationRequest
4679 {
4680  VkDeviceSize offset;
4681  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4682  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4683  VmaSuballocationList::iterator item;
4684  size_t itemsToMakeLostCount;
4685  void* customData;
4686 
4687  VkDeviceSize CalcCost() const
4688  {
4689  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4690  }
4691 };
4692 
4693 /*
4694 Data structure used for bookkeeping of allocations and unused ranges of memory
4695 in a single VkDeviceMemory block.
4696 */
4697 class VmaBlockMetadata
4698 {
4699 public:
4700  VmaBlockMetadata(VmaAllocator hAllocator);
4701  virtual ~VmaBlockMetadata() { }
4702  virtual void Init(VkDeviceSize size) { m_Size = size; }
4703 
4704  // Validates all data structures inside this object. If not valid, returns false.
4705  virtual bool Validate() const = 0;
4706  VkDeviceSize GetSize() const { return m_Size; }
4707  virtual size_t GetAllocationCount() const = 0;
4708  virtual VkDeviceSize GetSumFreeSize() const = 0;
4709  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4710  // Returns true if this block is empty - contains only single free suballocation.
4711  virtual bool IsEmpty() const = 0;
4712 
4713  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4714  // Shouldn't modify blockCount.
4715  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4716 
4717 #if VMA_STATS_STRING_ENABLED
4718  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4719 #endif
4720 
4721  // Tries to find a place for suballocation with given parameters inside this block.
4722  // If succeeded, fills pAllocationRequest and returns true.
4723  // If failed, returns false.
4724  virtual bool CreateAllocationRequest(
4725  uint32_t currentFrameIndex,
4726  uint32_t frameInUseCount,
4727  VkDeviceSize bufferImageGranularity,
4728  VkDeviceSize allocSize,
4729  VkDeviceSize allocAlignment,
4730  bool upperAddress,
4731  VmaSuballocationType allocType,
4732  bool canMakeOtherLost,
4733  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4734  VmaAllocationRequest* pAllocationRequest) = 0;
4735 
4736  virtual bool MakeRequestedAllocationsLost(
4737  uint32_t currentFrameIndex,
4738  uint32_t frameInUseCount,
4739  VmaAllocationRequest* pAllocationRequest) = 0;
4740 
4741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4742 
4743  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4744 
4745  // Makes actual allocation based on request. Request must already be checked and valid.
4746  virtual void Alloc(
4747  const VmaAllocationRequest& request,
4748  VmaSuballocationType type,
4749  VkDeviceSize allocSize,
4750  bool upperAddress,
4751  VmaAllocation hAllocation) = 0;
4752 
4753  // Frees suballocation assigned to given memory region.
4754  virtual void Free(const VmaAllocation allocation) = 0;
4755  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4756 
4757 protected:
4758  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4759 
4760 #if VMA_STATS_STRING_ENABLED
4761  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4762  VkDeviceSize unusedBytes,
4763  size_t allocationCount,
4764  size_t unusedRangeCount) const;
4765  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4766  VkDeviceSize offset,
4767  VmaAllocation hAllocation) const;
4768  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4769  VkDeviceSize offset,
4770  VkDeviceSize size) const;
4771  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4772 #endif
4773 
4774 private:
4775  VkDeviceSize m_Size;
4776  const VkAllocationCallbacks* m_pAllocationCallbacks;
4777 };
4778 
4779 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4780  VMA_ASSERT(0 && "Validation failed: " #cond); \
4781  return false; \
4782  } } while(false)
4783 
4784 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4785 {
4786  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4787 public:
4788  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4789  virtual ~VmaBlockMetadata_Generic();
4790  virtual void Init(VkDeviceSize size);
4791 
4792  virtual bool Validate() const;
4793  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4794  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4795  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4796  virtual bool IsEmpty() const;
4797 
4798  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4799  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4800 
4801 #if VMA_STATS_STRING_ENABLED
4802  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4803 #endif
4804 
4805  virtual bool CreateAllocationRequest(
4806  uint32_t currentFrameIndex,
4807  uint32_t frameInUseCount,
4808  VkDeviceSize bufferImageGranularity,
4809  VkDeviceSize allocSize,
4810  VkDeviceSize allocAlignment,
4811  bool upperAddress,
4812  VmaSuballocationType allocType,
4813  bool canMakeOtherLost,
4814  uint32_t strategy,
4815  VmaAllocationRequest* pAllocationRequest);
4816 
4817  virtual bool MakeRequestedAllocationsLost(
4818  uint32_t currentFrameIndex,
4819  uint32_t frameInUseCount,
4820  VmaAllocationRequest* pAllocationRequest);
4821 
4822  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4823 
4824  virtual VkResult CheckCorruption(const void* pBlockData);
4825 
4826  virtual void Alloc(
4827  const VmaAllocationRequest& request,
4828  VmaSuballocationType type,
4829  VkDeviceSize allocSize,
4830  bool upperAddress,
4831  VmaAllocation hAllocation);
4832 
4833  virtual void Free(const VmaAllocation allocation);
4834  virtual void FreeAtOffset(VkDeviceSize offset);
4835 
4836 private:
4837  uint32_t m_FreeCount;
4838  VkDeviceSize m_SumFreeSize;
4839  VmaSuballocationList m_Suballocations;
4840  // Suballocations that are free and have size greater than certain threshold.
4841  // Sorted by size, ascending.
4842  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4843 
4844  bool ValidateFreeSuballocationList() const;
4845 
4846  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4847  // If yes, fills pOffset and returns true. If no, returns false.
4848  bool CheckAllocation(
4849  uint32_t currentFrameIndex,
4850  uint32_t frameInUseCount,
4851  VkDeviceSize bufferImageGranularity,
4852  VkDeviceSize allocSize,
4853  VkDeviceSize allocAlignment,
4854  VmaSuballocationType allocType,
4855  VmaSuballocationList::const_iterator suballocItem,
4856  bool canMakeOtherLost,
4857  VkDeviceSize* pOffset,
4858  size_t* itemsToMakeLostCount,
4859  VkDeviceSize* pSumFreeSize,
4860  VkDeviceSize* pSumItemSize) const;
4861  // Given free suballocation, it merges it with following one, which must also be free.
4862  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4863  // Releases given suballocation, making it free.
4864  // Merges it with adjacent free suballocations if applicable.
4865  // Returns iterator to new free suballocation at this place.
4866  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4867  // Given free suballocation, it inserts it into sorted list of
4868  // m_FreeSuballocationsBySize if it's suitable.
4869  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4870  // Given free suballocation, it removes it from sorted list of
4871  // m_FreeSuballocationsBySize if it's suitable.
4872  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4873 };
4874 
4875 /*
4876 Allocations and their references in internal data structure look like this:
4877 
4878 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4879 
4880  0 +-------+
4881  | |
4882  | |
4883  | |
4884  +-------+
4885  | Alloc | 1st[m_1stNullItemsBeginCount]
4886  +-------+
4887  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4888  +-------+
4889  | ... |
4890  +-------+
4891  | Alloc | 1st[1st.size() - 1]
4892  +-------+
4893  | |
4894  | |
4895  | |
4896 GetSize() +-------+
4897 
4898 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4899 
4900  0 +-------+
4901  | Alloc | 2nd[0]
4902  +-------+
4903  | Alloc | 2nd[1]
4904  +-------+
4905  | ... |
4906  +-------+
4907  | Alloc | 2nd[2nd.size() - 1]
4908  +-------+
4909  | |
4910  | |
4911  | |
4912  +-------+
4913  | Alloc | 1st[m_1stNullItemsBeginCount]
4914  +-------+
4915  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4916  +-------+
4917  | ... |
4918  +-------+
4919  | Alloc | 1st[1st.size() - 1]
4920  +-------+
4921  | |
4922 GetSize() +-------+
4923 
4924 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4925 
4926  0 +-------+
4927  | |
4928  | |
4929  | |
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount]
4932  +-------+
4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4934  +-------+
4935  | ... |
4936  +-------+
4937  | Alloc | 1st[1st.size() - 1]
4938  +-------+
4939  | |
4940  | |
4941  | |
4942  +-------+
4943  | Alloc | 2nd[2nd.size() - 1]
4944  +-------+
4945  | ... |
4946  +-------+
4947  | Alloc | 2nd[1]
4948  +-------+
4949  | Alloc | 2nd[0]
4950 GetSize() +-------+
4951 
4952 */
4953 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4954 {
4955  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4956 public:
4957  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4958  virtual ~VmaBlockMetadata_Linear();
4959  virtual void Init(VkDeviceSize size);
4960 
4961  virtual bool Validate() const;
4962  virtual size_t GetAllocationCount() const;
4963  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4964  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4965  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4966 
4967  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4968  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4969 
4970 #if VMA_STATS_STRING_ENABLED
4971  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4972 #endif
4973 
4974  virtual bool CreateAllocationRequest(
4975  uint32_t currentFrameIndex,
4976  uint32_t frameInUseCount,
4977  VkDeviceSize bufferImageGranularity,
4978  VkDeviceSize allocSize,
4979  VkDeviceSize allocAlignment,
4980  bool upperAddress,
4981  VmaSuballocationType allocType,
4982  bool canMakeOtherLost,
4983  uint32_t strategy,
4984  VmaAllocationRequest* pAllocationRequest);
4985 
4986  virtual bool MakeRequestedAllocationsLost(
4987  uint32_t currentFrameIndex,
4988  uint32_t frameInUseCount,
4989  VmaAllocationRequest* pAllocationRequest);
4990 
4991  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4992 
4993  virtual VkResult CheckCorruption(const void* pBlockData);
4994 
4995  virtual void Alloc(
4996  const VmaAllocationRequest& request,
4997  VmaSuballocationType type,
4998  VkDeviceSize allocSize,
4999  bool upperAddress,
5000  VmaAllocation hAllocation);
5001 
5002  virtual void Free(const VmaAllocation allocation);
5003  virtual void FreeAtOffset(VkDeviceSize offset);
5004 
5005 private:
5006  /*
5007  There are two suballocation vectors, used in ping-pong way.
5008  The one with index m_1stVectorIndex is called 1st.
5009  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5010  2nd can be non-empty only when 1st is not empty.
5011  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5012  */
5013  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5014 
5015  enum SECOND_VECTOR_MODE
5016  {
5017  SECOND_VECTOR_EMPTY,
5018  /*
5019  Suballocations in 2nd vector are created later than the ones in 1st, but they
5020  all have smaller offset.
5021  */
5022  SECOND_VECTOR_RING_BUFFER,
5023  /*
5024  Suballocations in 2nd vector are upper side of double stack.
5025  They all have offsets higher than those in 1st vector.
5026  Top of this stack means smaller offsets, but higher indices in this vector.
5027  */
5028  SECOND_VECTOR_DOUBLE_STACK,
5029  };
5030 
5031  VkDeviceSize m_SumFreeSize;
5032  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5033  uint32_t m_1stVectorIndex;
5034  SECOND_VECTOR_MODE m_2ndVectorMode;
5035 
5036  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5037  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5038  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5039  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5040 
5041  // Number of items in 1st vector with hAllocation = null at the beginning.
5042  size_t m_1stNullItemsBeginCount;
5043  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5044  size_t m_1stNullItemsMiddleCount;
5045  // Number of items in 2nd vector with hAllocation = null.
5046  size_t m_2ndNullItemsCount;
5047 
5048  bool ShouldCompact1st() const;
5049  void CleanupAfterFree();
5050 };
5051 
5052 /*
5053 - GetSize() is the original size of allocated memory block.
5054 - m_UsableSize is this size aligned down to a power of two.
5055  All allocations and calculations happen relative to m_UsableSize.
5056 - GetUnusableSize() is the difference between them.
5057  It is repoted as separate, unused range, not available for allocations.
5058 
5059 Node at level 0 has size = m_UsableSize.
5060 Each next level contains nodes with size 2 times smaller than current level.
5061 m_LevelCount is the maximum number of levels to use in the current object.
5062 */
5063 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5064 {
5065  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5066 public:
5067  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5068  virtual ~VmaBlockMetadata_Buddy();
5069  virtual void Init(VkDeviceSize size);
5070 
5071  virtual bool Validate() const;
5072  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5073  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5074  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5075  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5076 
5077  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5078  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5079 
5080 #if VMA_STATS_STRING_ENABLED
5081  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5082 #endif
5083 
5084  virtual bool CreateAllocationRequest(
5085  uint32_t currentFrameIndex,
5086  uint32_t frameInUseCount,
5087  VkDeviceSize bufferImageGranularity,
5088  VkDeviceSize allocSize,
5089  VkDeviceSize allocAlignment,
5090  bool upperAddress,
5091  VmaSuballocationType allocType,
5092  bool canMakeOtherLost,
5093  uint32_t strategy,
5094  VmaAllocationRequest* pAllocationRequest);
5095 
5096  virtual bool MakeRequestedAllocationsLost(
5097  uint32_t currentFrameIndex,
5098  uint32_t frameInUseCount,
5099  VmaAllocationRequest* pAllocationRequest);
5100 
5101  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5102 
5103  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5104 
5105  virtual void Alloc(
5106  const VmaAllocationRequest& request,
5107  VmaSuballocationType type,
5108  VkDeviceSize allocSize,
5109  bool upperAddress,
5110  VmaAllocation hAllocation);
5111 
5112  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5113  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5114 
5115 private:
5116  static const VkDeviceSize MIN_NODE_SIZE = 32;
5117  static const size_t MAX_LEVELS = 30;
5118 
5119  struct ValidationContext
5120  {
5121  size_t calculatedAllocationCount;
5122  size_t calculatedFreeCount;
5123  VkDeviceSize calculatedSumFreeSize;
5124 
5125  ValidationContext() :
5126  calculatedAllocationCount(0),
5127  calculatedFreeCount(0),
5128  calculatedSumFreeSize(0) { }
5129  };
5130 
5131  struct Node
5132  {
5133  VkDeviceSize offset;
5134  enum TYPE
5135  {
5136  TYPE_FREE,
5137  TYPE_ALLOCATION,
5138  TYPE_SPLIT,
5139  TYPE_COUNT
5140  } type;
5141  Node* parent;
5142  Node* buddy;
5143 
5144  union
5145  {
5146  struct
5147  {
5148  Node* prev;
5149  Node* next;
5150  } free;
5151  struct
5152  {
5153  VmaAllocation alloc;
5154  } allocation;
5155  struct
5156  {
5157  Node* leftChild;
5158  } split;
5159  };
5160  };
5161 
5162  // Size of the memory block aligned down to a power of two.
5163  VkDeviceSize m_UsableSize;
5164  uint32_t m_LevelCount;
5165 
5166  Node* m_Root;
5167  struct {
5168  Node* front;
5169  Node* back;
5170  } m_FreeList[MAX_LEVELS];
5171  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5172  size_t m_AllocationCount;
5173  // Number of nodes in the tree with type == TYPE_FREE.
5174  size_t m_FreeCount;
5175  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5176  VkDeviceSize m_SumFreeSize;
5177 
5178  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5179  void DeleteNode(Node* node);
5180  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5181  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5182  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5183  // Alloc passed just for validation. Can be null.
5184  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5185  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5186  // Adds node to the front of FreeList at given level.
5187  // node->type must be FREE.
5188  // node->free.prev, next can be undefined.
5189  void AddToFreeListFront(uint32_t level, Node* node);
5190  // Removes node from FreeList at given level.
5191  // node->type must be FREE.
5192  // node->free.prev, next stay untouched.
5193  void RemoveFromFreeList(uint32_t level, Node* node);
5194 
5195 #if VMA_STATS_STRING_ENABLED
5196  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5197 #endif
5198 };
5199 
5200 /*
5201 Represents a single block of device memory (`VkDeviceMemory`) with all the
5202 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5203 
5204 Thread-safety: This class must be externally synchronized.
5205 */
5206 class VmaDeviceMemoryBlock
5207 {
5208  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5209 public:
5210  VmaBlockMetadata* m_pMetadata;
5211 
5212  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5213 
5214  ~VmaDeviceMemoryBlock()
5215  {
5216  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5217  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5218  }
5219 
5220  // Always call after construction.
5221  void Init(
5222  VmaAllocator hAllocator,
5223  uint32_t newMemoryTypeIndex,
5224  VkDeviceMemory newMemory,
5225  VkDeviceSize newSize,
5226  uint32_t id,
5227  uint32_t algorithm);
5228  // Always call before destruction.
5229  void Destroy(VmaAllocator allocator);
5230 
5231  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5232  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5233  uint32_t GetId() const { return m_Id; }
5234  void* GetMappedData() const { return m_pMappedData; }
5235 
5236  // Validates all data structures inside this object. If not valid, returns false.
5237  bool Validate() const;
5238 
5239  VkResult CheckCorruption(VmaAllocator hAllocator);
5240 
5241  // ppData can be null.
5242  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5243  void Unmap(VmaAllocator hAllocator, uint32_t count);
5244 
5245  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5246  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5247 
5248  VkResult BindBufferMemory(
5249  const VmaAllocator hAllocator,
5250  const VmaAllocation hAllocation,
5251  VkBuffer hBuffer);
5252  VkResult BindImageMemory(
5253  const VmaAllocator hAllocator,
5254  const VmaAllocation hAllocation,
5255  VkImage hImage);
5256 
5257 private:
5258  uint32_t m_MemoryTypeIndex;
5259  uint32_t m_Id;
5260  VkDeviceMemory m_hMemory;
5261 
5262  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5263  // Also protects m_MapCount, m_pMappedData.
5264  VMA_MUTEX m_Mutex;
5265  uint32_t m_MapCount;
5266  void* m_pMappedData;
5267 };
5268 
5269 struct VmaPointerLess
5270 {
5271  bool operator()(const void* lhs, const void* rhs) const
5272  {
5273  return lhs < rhs;
5274  }
5275 };
5276 
5277 class VmaDefragmentator;
5278 
5279 /*
5280 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5281 Vulkan memory type.
5282 
5283 Synchronized internally with a mutex.
5284 */
5285 struct VmaBlockVector
5286 {
5287  VMA_CLASS_NO_COPY(VmaBlockVector)
5288 public:
5289  VmaBlockVector(
5290  VmaAllocator hAllocator,
5291  uint32_t memoryTypeIndex,
5292  VkDeviceSize preferredBlockSize,
5293  size_t minBlockCount,
5294  size_t maxBlockCount,
5295  VkDeviceSize bufferImageGranularity,
5296  uint32_t frameInUseCount,
5297  bool isCustomPool,
5298  bool explicitBlockSize,
5299  uint32_t algorithm);
5300  ~VmaBlockVector();
5301 
5302  VkResult CreateMinBlocks();
5303 
5304  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5305  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5306  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5307  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5308  uint32_t GetAlgorithm() const { return m_Algorithm; }
5309 
5310  void GetPoolStats(VmaPoolStats* pStats);
5311 
5312  bool IsEmpty() const { return m_Blocks.empty(); }
5313  bool IsCorruptionDetectionEnabled() const;
5314 
5315  VkResult Allocate(
5316  VmaPool hCurrentPool,
5317  uint32_t currentFrameIndex,
5318  VkDeviceSize size,
5319  VkDeviceSize alignment,
5320  const VmaAllocationCreateInfo& createInfo,
5321  VmaSuballocationType suballocType,
5322  VmaAllocation* pAllocation);
5323 
5324  void Free(
5325  VmaAllocation hAllocation);
5326 
5327  // Adds statistics of this BlockVector to pStats.
5328  void AddStats(VmaStats* pStats);
5329 
5330 #if VMA_STATS_STRING_ENABLED
5331  void PrintDetailedMap(class VmaJsonWriter& json);
5332 #endif
5333 
5334  void MakePoolAllocationsLost(
5335  uint32_t currentFrameIndex,
5336  size_t* pLostAllocationCount);
5337  VkResult CheckCorruption();
5338 
5339  VmaDefragmentator* EnsureDefragmentator(
5340  VmaAllocator hAllocator,
5341  uint32_t currentFrameIndex);
5342 
5343  VkResult Defragment(
5344  VmaDefragmentationStats* pDefragmentationStats,
5345  VkDeviceSize& maxBytesToMove,
5346  uint32_t& maxAllocationsToMove);
5347 
5348  void DestroyDefragmentator();
5349 
5350 private:
5351  friend class VmaDefragmentator;
5352 
5353  const VmaAllocator m_hAllocator;
5354  const uint32_t m_MemoryTypeIndex;
5355  const VkDeviceSize m_PreferredBlockSize;
5356  const size_t m_MinBlockCount;
5357  const size_t m_MaxBlockCount;
5358  const VkDeviceSize m_BufferImageGranularity;
5359  const uint32_t m_FrameInUseCount;
5360  const bool m_IsCustomPool;
5361  const bool m_ExplicitBlockSize;
5362  const uint32_t m_Algorithm;
5363  bool m_HasEmptyBlock;
5364  VMA_MUTEX m_Mutex;
5365  // Incrementally sorted by sumFreeSize, ascending.
5366  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5367  /* There can be at most one allocation that is completely empty - a
5368  hysteresis to avoid pessimistic case of alternating creation and destruction
5369  of a VkDeviceMemory. */
5370  VmaDefragmentator* m_pDefragmentator;
5371  uint32_t m_NextBlockId;
5372 
5373  VkDeviceSize CalcMaxBlockSize() const;
5374 
5375  // Finds and removes given block from vector.
5376  void Remove(VmaDeviceMemoryBlock* pBlock);
5377 
5378  // Performs single step in sorting m_Blocks. They may not be fully sorted
5379  // after this call.
5380  void IncrementallySortBlocks();
5381 
5382  // To be used only without CAN_MAKE_OTHER_LOST flag.
5383  VkResult AllocateFromBlock(
5384  VmaDeviceMemoryBlock* pBlock,
5385  VmaPool hCurrentPool,
5386  uint32_t currentFrameIndex,
5387  VkDeviceSize size,
5388  VkDeviceSize alignment,
5389  VmaAllocationCreateFlags allocFlags,
5390  void* pUserData,
5391  VmaSuballocationType suballocType,
5392  uint32_t strategy,
5393  VmaAllocation* pAllocation);
5394 
5395  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5396 };
5397 
5398 struct VmaPool_T
5399 {
5400  VMA_CLASS_NO_COPY(VmaPool_T)
5401 public:
5402  VmaBlockVector m_BlockVector;
5403 
5404  VmaPool_T(
5405  VmaAllocator hAllocator,
5406  const VmaPoolCreateInfo& createInfo,
5407  VkDeviceSize preferredBlockSize);
5408  ~VmaPool_T();
5409 
5410  uint32_t GetId() const { return m_Id; }
5411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5412 
5413 #if VMA_STATS_STRING_ENABLED
5414  //void PrintDetailedMap(class VmaStringBuilder& sb);
5415 #endif
5416 
5417 private:
5418  uint32_t m_Id;
5419 };
5420 
5421 class VmaDefragmentator
5422 {
5423  VMA_CLASS_NO_COPY(VmaDefragmentator)
5424 private:
5425  const VmaAllocator m_hAllocator;
5426  VmaBlockVector* const m_pBlockVector;
5427  uint32_t m_CurrentFrameIndex;
5428  VkDeviceSize m_BytesMoved;
5429  uint32_t m_AllocationsMoved;
5430 
5431  struct AllocationInfo
5432  {
5433  VmaAllocation m_hAllocation;
5434  VkBool32* m_pChanged;
5435 
5436  AllocationInfo() :
5437  m_hAllocation(VK_NULL_HANDLE),
5438  m_pChanged(VMA_NULL)
5439  {
5440  }
5441  };
5442 
5443  struct AllocationInfoSizeGreater
5444  {
5445  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5446  {
5447  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5448  }
5449  };
5450 
5451  // Used between AddAllocation and Defragment.
5452  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5453 
5454  struct BlockInfo
5455  {
5456  VmaDeviceMemoryBlock* m_pBlock;
5457  bool m_HasNonMovableAllocations;
5458  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5459 
5460  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5461  m_pBlock(VMA_NULL),
5462  m_HasNonMovableAllocations(true),
5463  m_Allocations(pAllocationCallbacks),
5464  m_pMappedDataForDefragmentation(VMA_NULL)
5465  {
5466  }
5467 
5468  void CalcHasNonMovableAllocations()
5469  {
5470  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5471  const size_t defragmentAllocCount = m_Allocations.size();
5472  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5473  }
5474 
5475  void SortAllocationsBySizeDescecnding()
5476  {
5477  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5478  }
5479 
5480  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5481  void Unmap(VmaAllocator hAllocator);
5482 
5483  private:
5484  // Not null if mapped for defragmentation only, not originally mapped.
5485  void* m_pMappedDataForDefragmentation;
5486  };
5487 
5488  struct BlockPointerLess
5489  {
5490  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5491  {
5492  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5493  }
5494  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5495  {
5496  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5497  }
5498  };
5499 
5500  // 1. Blocks with some non-movable allocations go first.
5501  // 2. Blocks with smaller sumFreeSize go first.
5502  struct BlockInfoCompareMoveDestination
5503  {
5504  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5505  {
5506  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5507  {
5508  return true;
5509  }
5510  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5511  {
5512  return false;
5513  }
5514  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5515  {
5516  return true;
5517  }
5518  return false;
5519  }
5520  };
5521 
5522  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5523  BlockInfoVector m_Blocks;
5524 
5525  VkResult DefragmentRound(
5526  VkDeviceSize maxBytesToMove,
5527  uint32_t maxAllocationsToMove);
5528 
5529  static bool MoveMakesSense(
5530  size_t dstBlockIndex, VkDeviceSize dstOffset,
5531  size_t srcBlockIndex, VkDeviceSize srcOffset);
5532 
5533 public:
5534  VmaDefragmentator(
5535  VmaAllocator hAllocator,
5536  VmaBlockVector* pBlockVector,
5537  uint32_t currentFrameIndex);
5538 
5539  ~VmaDefragmentator();
5540 
5541  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5542  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5543 
5544  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5545 
5546  VkResult Defragment(
5547  VkDeviceSize maxBytesToMove,
5548  uint32_t maxAllocationsToMove);
5549 };
5550 
5551 #if VMA_RECORDING_ENABLED
5552 
5553 class VmaRecorder
5554 {
5555 public:
5556  VmaRecorder();
5557  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5558  void WriteConfiguration(
5559  const VkPhysicalDeviceProperties& devProps,
5560  const VkPhysicalDeviceMemoryProperties& memProps,
5561  bool dedicatedAllocationExtensionEnabled);
5562  ~VmaRecorder();
5563 
5564  void RecordCreateAllocator(uint32_t frameIndex);
5565  void RecordDestroyAllocator(uint32_t frameIndex);
5566  void RecordCreatePool(uint32_t frameIndex,
5567  const VmaPoolCreateInfo& createInfo,
5568  VmaPool pool);
5569  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5570  void RecordAllocateMemory(uint32_t frameIndex,
5571  const VkMemoryRequirements& vkMemReq,
5572  const VmaAllocationCreateInfo& createInfo,
5573  VmaAllocation allocation);
5574  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5575  const VkMemoryRequirements& vkMemReq,
5576  bool requiresDedicatedAllocation,
5577  bool prefersDedicatedAllocation,
5578  const VmaAllocationCreateInfo& createInfo,
5579  VmaAllocation allocation);
5580  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5581  const VkMemoryRequirements& vkMemReq,
5582  bool requiresDedicatedAllocation,
5583  bool prefersDedicatedAllocation,
5584  const VmaAllocationCreateInfo& createInfo,
5585  VmaAllocation allocation);
5586  void RecordFreeMemory(uint32_t frameIndex,
5587  VmaAllocation allocation);
5588  void RecordSetAllocationUserData(uint32_t frameIndex,
5589  VmaAllocation allocation,
5590  const void* pUserData);
5591  void RecordCreateLostAllocation(uint32_t frameIndex,
5592  VmaAllocation allocation);
5593  void RecordMapMemory(uint32_t frameIndex,
5594  VmaAllocation allocation);
5595  void RecordUnmapMemory(uint32_t frameIndex,
5596  VmaAllocation allocation);
5597  void RecordFlushAllocation(uint32_t frameIndex,
5598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5599  void RecordInvalidateAllocation(uint32_t frameIndex,
5600  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5601  void RecordCreateBuffer(uint32_t frameIndex,
5602  const VkBufferCreateInfo& bufCreateInfo,
5603  const VmaAllocationCreateInfo& allocCreateInfo,
5604  VmaAllocation allocation);
5605  void RecordCreateImage(uint32_t frameIndex,
5606  const VkImageCreateInfo& imageCreateInfo,
5607  const VmaAllocationCreateInfo& allocCreateInfo,
5608  VmaAllocation allocation);
5609  void RecordDestroyBuffer(uint32_t frameIndex,
5610  VmaAllocation allocation);
5611  void RecordDestroyImage(uint32_t frameIndex,
5612  VmaAllocation allocation);
5613  void RecordTouchAllocation(uint32_t frameIndex,
5614  VmaAllocation allocation);
5615  void RecordGetAllocationInfo(uint32_t frameIndex,
5616  VmaAllocation allocation);
5617  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5618  VmaPool pool);
5619 
5620 private:
5621  struct CallParams
5622  {
5623  uint32_t threadId;
5624  double time;
5625  };
5626 
5627  class UserDataString
5628  {
5629  public:
5630  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5631  const char* GetString() const { return m_Str; }
5632 
5633  private:
5634  char m_PtrStr[17];
5635  const char* m_Str;
5636  };
5637 
5638  bool m_UseMutex;
5639  VmaRecordFlags m_Flags;
5640  FILE* m_File;
5641  VMA_MUTEX m_FileMutex;
5642  int64_t m_Freq;
5643  int64_t m_StartCounter;
5644 
5645  void GetBasicParams(CallParams& outParams);
5646  void Flush();
5647 };
5648 
5649 #endif // #if VMA_RECORDING_ENABLED
5650 
5651 // Main allocator object.
5652 struct VmaAllocator_T
5653 {
5654  VMA_CLASS_NO_COPY(VmaAllocator_T)
5655 public:
5656  bool m_UseMutex;
5657  bool m_UseKhrDedicatedAllocation;
5658  VkDevice m_hDevice;
5659  bool m_AllocationCallbacksSpecified;
5660  VkAllocationCallbacks m_AllocationCallbacks;
5661  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5662 
5663  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5664  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5665  VMA_MUTEX m_HeapSizeLimitMutex;
5666 
5667  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5668  VkPhysicalDeviceMemoryProperties m_MemProps;
5669 
5670  // Default pools.
5671  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5672 
5673  // Each vector is sorted by memory (handle value).
5674  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5675  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5676  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5677 
5678  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5679  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5680  ~VmaAllocator_T();
5681 
5682  const VkAllocationCallbacks* GetAllocationCallbacks() const
5683  {
5684  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5685  }
5686  const VmaVulkanFunctions& GetVulkanFunctions() const
5687  {
5688  return m_VulkanFunctions;
5689  }
5690 
5691  VkDeviceSize GetBufferImageGranularity() const
5692  {
5693  return VMA_MAX(
5694  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5695  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5696  }
5697 
5698  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5699  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5700 
5701  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5702  {
5703  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5704  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5705  }
5706  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5707  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5708  {
5709  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5710  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5711  }
5712  // Minimum alignment for all allocations in specific memory type.
5713  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5714  {
5715  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5716  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5717  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5718  }
5719 
5720  bool IsIntegratedGpu() const
5721  {
5722  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5723  }
5724 
5725 #if VMA_RECORDING_ENABLED
5726  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5727 #endif
5728 
5729  void GetBufferMemoryRequirements(
5730  VkBuffer hBuffer,
5731  VkMemoryRequirements& memReq,
5732  bool& requiresDedicatedAllocation,
5733  bool& prefersDedicatedAllocation) const;
5734  void GetImageMemoryRequirements(
5735  VkImage hImage,
5736  VkMemoryRequirements& memReq,
5737  bool& requiresDedicatedAllocation,
5738  bool& prefersDedicatedAllocation) const;
5739 
5740  // Main allocation function.
5741  VkResult AllocateMemory(
5742  const VkMemoryRequirements& vkMemReq,
5743  bool requiresDedicatedAllocation,
5744  bool prefersDedicatedAllocation,
5745  VkBuffer dedicatedBuffer,
5746  VkImage dedicatedImage,
5747  const VmaAllocationCreateInfo& createInfo,
5748  VmaSuballocationType suballocType,
5749  VmaAllocation* pAllocation);
5750 
5751  // Main deallocation function.
5752  void FreeMemory(const VmaAllocation allocation);
5753 
5754  void CalculateStats(VmaStats* pStats);
5755 
5756 #if VMA_STATS_STRING_ENABLED
5757  void PrintDetailedMap(class VmaJsonWriter& json);
5758 #endif
5759 
5760  VkResult Defragment(
5761  VmaAllocation* pAllocations,
5762  size_t allocationCount,
5763  VkBool32* pAllocationsChanged,
5764  const VmaDefragmentationInfo* pDefragmentationInfo,
5765  VmaDefragmentationStats* pDefragmentationStats);
5766 
5767  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5768  bool TouchAllocation(VmaAllocation hAllocation);
5769 
5770  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5771  void DestroyPool(VmaPool pool);
5772  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5773 
5774  void SetCurrentFrameIndex(uint32_t frameIndex);
5775  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5776 
5777  void MakePoolAllocationsLost(
5778  VmaPool hPool,
5779  size_t* pLostAllocationCount);
5780  VkResult CheckPoolCorruption(VmaPool hPool);
5781  VkResult CheckCorruption(uint32_t memoryTypeBits);
5782 
5783  void CreateLostAllocation(VmaAllocation* pAllocation);
5784 
5785  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5786  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5787 
5788  VkResult Map(VmaAllocation hAllocation, void** ppData);
5789  void Unmap(VmaAllocation hAllocation);
5790 
5791  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5792  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5793 
5794  void FlushOrInvalidateAllocation(
5795  VmaAllocation hAllocation,
5796  VkDeviceSize offset, VkDeviceSize size,
5797  VMA_CACHE_OPERATION op);
5798 
5799  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5800 
5801 private:
5802  VkDeviceSize m_PreferredLargeHeapBlockSize;
5803 
5804  VkPhysicalDevice m_PhysicalDevice;
5805  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5806 
5807  VMA_MUTEX m_PoolsMutex;
5808  // Protected by m_PoolsMutex. Sorted by pointer value.
5809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5810  uint32_t m_NextPoolId;
5811 
5812  VmaVulkanFunctions m_VulkanFunctions;
5813 
5814 #if VMA_RECORDING_ENABLED
5815  VmaRecorder* m_pRecorder;
5816 #endif
5817 
5818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5819 
5820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5821 
5822  VkResult AllocateMemoryOfType(
5823  VkDeviceSize size,
5824  VkDeviceSize alignment,
5825  bool dedicatedAllocation,
5826  VkBuffer dedicatedBuffer,
5827  VkImage dedicatedImage,
5828  const VmaAllocationCreateInfo& createInfo,
5829  uint32_t memTypeIndex,
5830  VmaSuballocationType suballocType,
5831  VmaAllocation* pAllocation);
5832 
5833  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5834  VkResult AllocateDedicatedMemory(
5835  VkDeviceSize size,
5836  VmaSuballocationType suballocType,
5837  uint32_t memTypeIndex,
5838  bool map,
5839  bool isUserDataString,
5840  void* pUserData,
5841  VkBuffer dedicatedBuffer,
5842  VkImage dedicatedImage,
5843  VmaAllocation* pAllocation);
5844 
5845  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5846  void FreeDedicatedMemory(VmaAllocation allocation);
5847 };
5848 
5850 // Memory allocation #2 after VmaAllocator_T definition
5851 
5852 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5853 {
5854  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5855 }
5856 
5857 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5858 {
5859  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5860 }
5861 
5862 template<typename T>
5863 static T* VmaAllocate(VmaAllocator hAllocator)
5864 {
5865  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5866 }
5867 
5868 template<typename T>
5869 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5870 {
5871  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5872 }
5873 
5874 template<typename T>
5875 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5876 {
5877  if(ptr != VMA_NULL)
5878  {
5879  ptr->~T();
5880  VmaFree(hAllocator, ptr);
5881  }
5882 }
5883 
5884 template<typename T>
5885 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5886 {
5887  if(ptr != VMA_NULL)
5888  {
5889  for(size_t i = count; i--; )
5890  ptr[i].~T();
5891  VmaFree(hAllocator, ptr);
5892  }
5893 }
5894 
5896 // VmaStringBuilder
5897 
5898 #if VMA_STATS_STRING_ENABLED
5899 
5900 class VmaStringBuilder
5901 {
5902 public:
5903  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5904  size_t GetLength() const { return m_Data.size(); }
5905  const char* GetData() const { return m_Data.data(); }
5906 
5907  void Add(char ch) { m_Data.push_back(ch); }
5908  void Add(const char* pStr);
5909  void AddNewLine() { Add('\n'); }
5910  void AddNumber(uint32_t num);
5911  void AddNumber(uint64_t num);
5912  void AddPointer(const void* ptr);
5913 
5914 private:
5915  VmaVector< char, VmaStlAllocator<char> > m_Data;
5916 };
5917 
5918 void VmaStringBuilder::Add(const char* pStr)
5919 {
5920  const size_t strLen = strlen(pStr);
5921  if(strLen > 0)
5922  {
5923  const size_t oldCount = m_Data.size();
5924  m_Data.resize(oldCount + strLen);
5925  memcpy(m_Data.data() + oldCount, pStr, strLen);
5926  }
5927 }
5928 
5929 void VmaStringBuilder::AddNumber(uint32_t num)
5930 {
5931  char buf[11];
5932  VmaUint32ToStr(buf, sizeof(buf), num);
5933  Add(buf);
5934 }
5935 
5936 void VmaStringBuilder::AddNumber(uint64_t num)
5937 {
5938  char buf[21];
5939  VmaUint64ToStr(buf, sizeof(buf), num);
5940  Add(buf);
5941 }
5942 
5943 void VmaStringBuilder::AddPointer(const void* ptr)
5944 {
5945  char buf[21];
5946  VmaPtrToStr(buf, sizeof(buf), ptr);
5947  Add(buf);
5948 }
5949 
5950 #endif // #if VMA_STATS_STRING_ENABLED
5951 
5953 // VmaJsonWriter
5954 
5955 #if VMA_STATS_STRING_ENABLED
5956 
5957 class VmaJsonWriter
5958 {
5959  VMA_CLASS_NO_COPY(VmaJsonWriter)
5960 public:
5961  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5962  ~VmaJsonWriter();
5963 
5964  void BeginObject(bool singleLine = false);
5965  void EndObject();
5966 
5967  void BeginArray(bool singleLine = false);
5968  void EndArray();
5969 
5970  void WriteString(const char* pStr);
5971  void BeginString(const char* pStr = VMA_NULL);
5972  void ContinueString(const char* pStr);
5973  void ContinueString(uint32_t n);
5974  void ContinueString(uint64_t n);
5975  void ContinueString_Pointer(const void* ptr);
5976  void EndString(const char* pStr = VMA_NULL);
5977 
5978  void WriteNumber(uint32_t n);
5979  void WriteNumber(uint64_t n);
5980  void WriteBool(bool b);
5981  void WriteNull();
5982 
5983 private:
5984  static const char* const INDENT;
5985 
5986  enum COLLECTION_TYPE
5987  {
5988  COLLECTION_TYPE_OBJECT,
5989  COLLECTION_TYPE_ARRAY,
5990  };
5991  struct StackItem
5992  {
5993  COLLECTION_TYPE type;
5994  uint32_t valueCount;
5995  bool singleLineMode;
5996  };
5997 
5998  VmaStringBuilder& m_SB;
5999  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6000  bool m_InsideString;
6001 
6002  void BeginValue(bool isString);
6003  void WriteIndent(bool oneLess = false);
6004 };
6005 
6006 const char* const VmaJsonWriter::INDENT = " ";
6007 
6008 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6009  m_SB(sb),
6010  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6011  m_InsideString(false)
6012 {
6013 }
6014 
6015 VmaJsonWriter::~VmaJsonWriter()
6016 {
6017  VMA_ASSERT(!m_InsideString);
6018  VMA_ASSERT(m_Stack.empty());
6019 }
6020 
6021 void VmaJsonWriter::BeginObject(bool singleLine)
6022 {
6023  VMA_ASSERT(!m_InsideString);
6024 
6025  BeginValue(false);
6026  m_SB.Add('{');
6027 
6028  StackItem item;
6029  item.type = COLLECTION_TYPE_OBJECT;
6030  item.valueCount = 0;
6031  item.singleLineMode = singleLine;
6032  m_Stack.push_back(item);
6033 }
6034 
6035 void VmaJsonWriter::EndObject()
6036 {
6037  VMA_ASSERT(!m_InsideString);
6038 
6039  WriteIndent(true);
6040  m_SB.Add('}');
6041 
6042  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6043  m_Stack.pop_back();
6044 }
6045 
6046 void VmaJsonWriter::BeginArray(bool singleLine)
6047 {
6048  VMA_ASSERT(!m_InsideString);
6049 
6050  BeginValue(false);
6051  m_SB.Add('[');
6052 
6053  StackItem item;
6054  item.type = COLLECTION_TYPE_ARRAY;
6055  item.valueCount = 0;
6056  item.singleLineMode = singleLine;
6057  m_Stack.push_back(item);
6058 }
6059 
6060 void VmaJsonWriter::EndArray()
6061 {
6062  VMA_ASSERT(!m_InsideString);
6063 
6064  WriteIndent(true);
6065  m_SB.Add(']');
6066 
6067  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6068  m_Stack.pop_back();
6069 }
6070 
6071 void VmaJsonWriter::WriteString(const char* pStr)
6072 {
6073  BeginString(pStr);
6074  EndString();
6075 }
6076 
6077 void VmaJsonWriter::BeginString(const char* pStr)
6078 {
6079  VMA_ASSERT(!m_InsideString);
6080 
6081  BeginValue(true);
6082  m_SB.Add('"');
6083  m_InsideString = true;
6084  if(pStr != VMA_NULL && pStr[0] != '\0')
6085  {
6086  ContinueString(pStr);
6087  }
6088 }
6089 
6090 void VmaJsonWriter::ContinueString(const char* pStr)
6091 {
6092  VMA_ASSERT(m_InsideString);
6093 
6094  const size_t strLen = strlen(pStr);
6095  for(size_t i = 0; i < strLen; ++i)
6096  {
6097  char ch = pStr[i];
6098  if(ch == '\\')
6099  {
6100  m_SB.Add("\\\\");
6101  }
6102  else if(ch == '"')
6103  {
6104  m_SB.Add("\\\"");
6105  }
6106  else if(ch >= 32)
6107  {
6108  m_SB.Add(ch);
6109  }
6110  else switch(ch)
6111  {
6112  case '\b':
6113  m_SB.Add("\\b");
6114  break;
6115  case '\f':
6116  m_SB.Add("\\f");
6117  break;
6118  case '\n':
6119  m_SB.Add("\\n");
6120  break;
6121  case '\r':
6122  m_SB.Add("\\r");
6123  break;
6124  case '\t':
6125  m_SB.Add("\\t");
6126  break;
6127  default:
6128  VMA_ASSERT(0 && "Character not currently supported.");
6129  break;
6130  }
6131  }
6132 }
6133 
6134 void VmaJsonWriter::ContinueString(uint32_t n)
6135 {
6136  VMA_ASSERT(m_InsideString);
6137  m_SB.AddNumber(n);
6138 }
6139 
6140 void VmaJsonWriter::ContinueString(uint64_t n)
6141 {
6142  VMA_ASSERT(m_InsideString);
6143  m_SB.AddNumber(n);
6144 }
6145 
6146 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6147 {
6148  VMA_ASSERT(m_InsideString);
6149  m_SB.AddPointer(ptr);
6150 }
6151 
6152 void VmaJsonWriter::EndString(const char* pStr)
6153 {
6154  VMA_ASSERT(m_InsideString);
6155  if(pStr != VMA_NULL && pStr[0] != '\0')
6156  {
6157  ContinueString(pStr);
6158  }
6159  m_SB.Add('"');
6160  m_InsideString = false;
6161 }
6162 
6163 void VmaJsonWriter::WriteNumber(uint32_t n)
6164 {
6165  VMA_ASSERT(!m_InsideString);
6166  BeginValue(false);
6167  m_SB.AddNumber(n);
6168 }
6169 
6170 void VmaJsonWriter::WriteNumber(uint64_t n)
6171 {
6172  VMA_ASSERT(!m_InsideString);
6173  BeginValue(false);
6174  m_SB.AddNumber(n);
6175 }
6176 
6177 void VmaJsonWriter::WriteBool(bool b)
6178 {
6179  VMA_ASSERT(!m_InsideString);
6180  BeginValue(false);
6181  m_SB.Add(b ? "true" : "false");
6182 }
6183 
6184 void VmaJsonWriter::WriteNull()
6185 {
6186  VMA_ASSERT(!m_InsideString);
6187  BeginValue(false);
6188  m_SB.Add("null");
6189 }
6190 
6191 void VmaJsonWriter::BeginValue(bool isString)
6192 {
6193  if(!m_Stack.empty())
6194  {
6195  StackItem& currItem = m_Stack.back();
6196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6197  currItem.valueCount % 2 == 0)
6198  {
6199  VMA_ASSERT(isString);
6200  }
6201 
6202  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6203  currItem.valueCount % 2 != 0)
6204  {
6205  m_SB.Add(": ");
6206  }
6207  else if(currItem.valueCount > 0)
6208  {
6209  m_SB.Add(", ");
6210  WriteIndent();
6211  }
6212  else
6213  {
6214  WriteIndent();
6215  }
6216  ++currItem.valueCount;
6217  }
6218 }
6219 
6220 void VmaJsonWriter::WriteIndent(bool oneLess)
6221 {
6222  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6223  {
6224  m_SB.AddNewLine();
6225 
6226  size_t count = m_Stack.size();
6227  if(count > 0 && oneLess)
6228  {
6229  --count;
6230  }
6231  for(size_t i = 0; i < count; ++i)
6232  {
6233  m_SB.Add(INDENT);
6234  }
6235  }
6236 }
6237 
6238 #endif // #if VMA_STATS_STRING_ENABLED
6239 
6241 
6242 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6243 {
6244  if(IsUserDataString())
6245  {
6246  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6247 
6248  FreeUserDataString(hAllocator);
6249 
6250  if(pUserData != VMA_NULL)
6251  {
6252  const char* const newStrSrc = (char*)pUserData;
6253  const size_t newStrLen = strlen(newStrSrc);
6254  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6255  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6256  m_pUserData = newStrDst;
6257  }
6258  }
6259  else
6260  {
6261  m_pUserData = pUserData;
6262  }
6263 }
6264 
6265 void VmaAllocation_T::ChangeBlockAllocation(
6266  VmaAllocator hAllocator,
6267  VmaDeviceMemoryBlock* block,
6268  VkDeviceSize offset)
6269 {
6270  VMA_ASSERT(block != VMA_NULL);
6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6272 
6273  // Move mapping reference counter from old block to new block.
6274  if(block != m_BlockAllocation.m_Block)
6275  {
6276  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6277  if(IsPersistentMap())
6278  ++mapRefCount;
6279  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6280  block->Map(hAllocator, mapRefCount, VMA_NULL);
6281  }
6282 
6283  m_BlockAllocation.m_Block = block;
6284  m_BlockAllocation.m_Offset = offset;
6285 }
6286 
6287 VkDeviceSize VmaAllocation_T::GetOffset() const
6288 {
6289  switch(m_Type)
6290  {
6291  case ALLOCATION_TYPE_BLOCK:
6292  return m_BlockAllocation.m_Offset;
6293  case ALLOCATION_TYPE_DEDICATED:
6294  return 0;
6295  default:
6296  VMA_ASSERT(0);
6297  return 0;
6298  }
6299 }
6300 
6301 VkDeviceMemory VmaAllocation_T::GetMemory() const
6302 {
6303  switch(m_Type)
6304  {
6305  case ALLOCATION_TYPE_BLOCK:
6306  return m_BlockAllocation.m_Block->GetDeviceMemory();
6307  case ALLOCATION_TYPE_DEDICATED:
6308  return m_DedicatedAllocation.m_hMemory;
6309  default:
6310  VMA_ASSERT(0);
6311  return VK_NULL_HANDLE;
6312  }
6313 }
6314 
6315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6316 {
6317  switch(m_Type)
6318  {
6319  case ALLOCATION_TYPE_BLOCK:
6320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6321  case ALLOCATION_TYPE_DEDICATED:
6322  return m_DedicatedAllocation.m_MemoryTypeIndex;
6323  default:
6324  VMA_ASSERT(0);
6325  return UINT32_MAX;
6326  }
6327 }
6328 
6329 void* VmaAllocation_T::GetMappedData() const
6330 {
6331  switch(m_Type)
6332  {
6333  case ALLOCATION_TYPE_BLOCK:
6334  if(m_MapCount != 0)
6335  {
6336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6337  VMA_ASSERT(pBlockData != VMA_NULL);
6338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6339  }
6340  else
6341  {
6342  return VMA_NULL;
6343  }
6344  break;
6345  case ALLOCATION_TYPE_DEDICATED:
6346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6347  return m_DedicatedAllocation.m_pMappedData;
6348  default:
6349  VMA_ASSERT(0);
6350  return VMA_NULL;
6351  }
6352 }
6353 
6354 bool VmaAllocation_T::CanBecomeLost() const
6355 {
6356  switch(m_Type)
6357  {
6358  case ALLOCATION_TYPE_BLOCK:
6359  return m_BlockAllocation.m_CanBecomeLost;
6360  case ALLOCATION_TYPE_DEDICATED:
6361  return false;
6362  default:
6363  VMA_ASSERT(0);
6364  return false;
6365  }
6366 }
6367 
6368 VmaPool VmaAllocation_T::GetPool() const
6369 {
6370  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6371  return m_BlockAllocation.m_hPool;
6372 }
6373 
6374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6375 {
6376  VMA_ASSERT(CanBecomeLost());
6377 
6378  /*
6379  Warning: This is a carefully designed algorithm.
6380  Do not modify unless you really know what you're doing :)
6381  */
6382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6383  for(;;)
6384  {
6385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6386  {
6387  VMA_ASSERT(0);
6388  return false;
6389  }
6390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6391  {
6392  return false;
6393  }
6394  else // Last use time earlier than current time.
6395  {
6396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6397  {
6398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6400  return true;
6401  }
6402  }
6403  }
6404 }
6405 
6406 #if VMA_STATS_STRING_ENABLED
6407 
6408 // Correspond to values of enum VmaSuballocationType.
6409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6410  "FREE",
6411  "UNKNOWN",
6412  "BUFFER",
6413  "IMAGE_UNKNOWN",
6414  "IMAGE_LINEAR",
6415  "IMAGE_OPTIMAL",
6416 };
6417 
6418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6419 {
6420  json.WriteString("Type");
6421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6422 
6423  json.WriteString("Size");
6424  json.WriteNumber(m_Size);
6425 
6426  if(m_pUserData != VMA_NULL)
6427  {
6428  json.WriteString("UserData");
6429  if(IsUserDataString())
6430  {
6431  json.WriteString((const char*)m_pUserData);
6432  }
6433  else
6434  {
6435  json.BeginString();
6436  json.ContinueString_Pointer(m_pUserData);
6437  json.EndString();
6438  }
6439  }
6440 
6441  json.WriteString("CreationFrameIndex");
6442  json.WriteNumber(m_CreationFrameIndex);
6443 
6444  json.WriteString("LastUseFrameIndex");
6445  json.WriteNumber(GetLastUseFrameIndex());
6446 
6447  if(m_BufferImageUsage != 0)
6448  {
6449  json.WriteString("Usage");
6450  json.WriteNumber(m_BufferImageUsage);
6451  }
6452 }
6453 
6454 #endif
6455 
6456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6457 {
6458  VMA_ASSERT(IsUserDataString());
6459  if(m_pUserData != VMA_NULL)
6460  {
6461  char* const oldStr = (char*)m_pUserData;
6462  const size_t oldStrLen = strlen(oldStr);
6463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6464  m_pUserData = VMA_NULL;
6465  }
6466 }
6467 
6468 void VmaAllocation_T::BlockAllocMap()
6469 {
6470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6471 
6472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6473  {
6474  ++m_MapCount;
6475  }
6476  else
6477  {
6478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6479  }
6480 }
6481 
6482 void VmaAllocation_T::BlockAllocUnmap()
6483 {
6484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6485 
6486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6487  {
6488  --m_MapCount;
6489  }
6490  else
6491  {
6492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6493  }
6494 }
6495 
6496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6497 {
6498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6499 
6500  if(m_MapCount != 0)
6501  {
6502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6503  {
6504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6505  *ppData = m_DedicatedAllocation.m_pMappedData;
6506  ++m_MapCount;
6507  return VK_SUCCESS;
6508  }
6509  else
6510  {
6511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6512  return VK_ERROR_MEMORY_MAP_FAILED;
6513  }
6514  }
6515  else
6516  {
6517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6518  hAllocator->m_hDevice,
6519  m_DedicatedAllocation.m_hMemory,
6520  0, // offset
6521  VK_WHOLE_SIZE,
6522  0, // flags
6523  ppData);
6524  if(result == VK_SUCCESS)
6525  {
6526  m_DedicatedAllocation.m_pMappedData = *ppData;
6527  m_MapCount = 1;
6528  }
6529  return result;
6530  }
6531 }
6532 
6533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6534 {
6535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6536 
6537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6538  {
6539  --m_MapCount;
6540  if(m_MapCount == 0)
6541  {
6542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6544  hAllocator->m_hDevice,
6545  m_DedicatedAllocation.m_hMemory);
6546  }
6547  }
6548  else
6549  {
6550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6551  }
6552 }
6553 
6554 #if VMA_STATS_STRING_ENABLED
6555 
6556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6557 {
6558  json.BeginObject();
6559 
6560  json.WriteString("Blocks");
6561  json.WriteNumber(stat.blockCount);
6562 
6563  json.WriteString("Allocations");
6564  json.WriteNumber(stat.allocationCount);
6565 
6566  json.WriteString("UnusedRanges");
6567  json.WriteNumber(stat.unusedRangeCount);
6568 
6569  json.WriteString("UsedBytes");
6570  json.WriteNumber(stat.usedBytes);
6571 
6572  json.WriteString("UnusedBytes");
6573  json.WriteNumber(stat.unusedBytes);
6574 
6575  if(stat.allocationCount > 1)
6576  {
6577  json.WriteString("AllocationSize");
6578  json.BeginObject(true);
6579  json.WriteString("Min");
6580  json.WriteNumber(stat.allocationSizeMin);
6581  json.WriteString("Avg");
6582  json.WriteNumber(stat.allocationSizeAvg);
6583  json.WriteString("Max");
6584  json.WriteNumber(stat.allocationSizeMax);
6585  json.EndObject();
6586  }
6587 
6588  if(stat.unusedRangeCount > 1)
6589  {
6590  json.WriteString("UnusedRangeSize");
6591  json.BeginObject(true);
6592  json.WriteString("Min");
6593  json.WriteNumber(stat.unusedRangeSizeMin);
6594  json.WriteString("Avg");
6595  json.WriteNumber(stat.unusedRangeSizeAvg);
6596  json.WriteString("Max");
6597  json.WriteNumber(stat.unusedRangeSizeMax);
6598  json.EndObject();
6599  }
6600 
6601  json.EndObject();
6602 }
6603 
6604 #endif // #if VMA_STATS_STRING_ENABLED
6605 
6606 struct VmaSuballocationItemSizeLess
6607 {
6608  bool operator()(
6609  const VmaSuballocationList::iterator lhs,
6610  const VmaSuballocationList::iterator rhs) const
6611  {
6612  return lhs->size < rhs->size;
6613  }
6614  bool operator()(
6615  const VmaSuballocationList::iterator lhs,
6616  VkDeviceSize rhsSize) const
6617  {
6618  return lhs->size < rhsSize;
6619  }
6620 };
6621 
6622 
6624 // class VmaBlockMetadata
6625 
6626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6627  m_Size(0),
6628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6629 {
6630 }
6631 
6632 #if VMA_STATS_STRING_ENABLED
6633 
6634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6635  VkDeviceSize unusedBytes,
6636  size_t allocationCount,
6637  size_t unusedRangeCount) const
6638 {
6639  json.BeginObject();
6640 
6641  json.WriteString("TotalBytes");
6642  json.WriteNumber(GetSize());
6643 
6644  json.WriteString("UnusedBytes");
6645  json.WriteNumber(unusedBytes);
6646 
6647  json.WriteString("Allocations");
6648  json.WriteNumber((uint64_t)allocationCount);
6649 
6650  json.WriteString("UnusedRanges");
6651  json.WriteNumber((uint64_t)unusedRangeCount);
6652 
6653  json.WriteString("Suballocations");
6654  json.BeginArray();
6655 }
6656 
6657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6658  VkDeviceSize offset,
6659  VmaAllocation hAllocation) const
6660 {
6661  json.BeginObject(true);
6662 
6663  json.WriteString("Offset");
6664  json.WriteNumber(offset);
6665 
6666  hAllocation->PrintParameters(json);
6667 
6668  json.EndObject();
6669 }
6670 
6671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6672  VkDeviceSize offset,
6673  VkDeviceSize size) const
6674 {
6675  json.BeginObject(true);
6676 
6677  json.WriteString("Offset");
6678  json.WriteNumber(offset);
6679 
6680  json.WriteString("Type");
6681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6682 
6683  json.WriteString("Size");
6684  json.WriteNumber(size);
6685 
6686  json.EndObject();
6687 }
6688 
6689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6690 {
6691  json.EndArray();
6692  json.EndObject();
6693 }
6694 
6695 #endif // #if VMA_STATS_STRING_ENABLED
6696 
6698 // class VmaBlockMetadata_Generic
6699 
6700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6701  VmaBlockMetadata(hAllocator),
6702  m_FreeCount(0),
6703  m_SumFreeSize(0),
6704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6706 {
6707 }
6708 
6709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6710 {
6711 }
6712 
6713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6714 {
6715  VmaBlockMetadata::Init(size);
6716 
6717  m_FreeCount = 1;
6718  m_SumFreeSize = size;
6719 
6720  VmaSuballocation suballoc = {};
6721  suballoc.offset = 0;
6722  suballoc.size = size;
6723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6724  suballoc.hAllocation = VK_NULL_HANDLE;
6725 
6726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6727  m_Suballocations.push_back(suballoc);
6728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6729  --suballocItem;
6730  m_FreeSuballocationsBySize.push_back(suballocItem);
6731 }
6732 
6733 bool VmaBlockMetadata_Generic::Validate() const
6734 {
6735  VMA_VALIDATE(!m_Suballocations.empty());
6736 
6737  // Expected offset of new suballocation as calculated from previous ones.
6738  VkDeviceSize calculatedOffset = 0;
6739  // Expected number of free suballocations as calculated from traversing their list.
6740  uint32_t calculatedFreeCount = 0;
6741  // Expected sum size of free suballocations as calculated from traversing their list.
6742  VkDeviceSize calculatedSumFreeSize = 0;
6743  // Expected number of free suballocations that should be registered in
6744  // m_FreeSuballocationsBySize calculated from traversing their list.
6745  size_t freeSuballocationsToRegister = 0;
6746  // True if previous visited suballocation was free.
6747  bool prevFree = false;
6748 
6749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6750  suballocItem != m_Suballocations.cend();
6751  ++suballocItem)
6752  {
6753  const VmaSuballocation& subAlloc = *suballocItem;
6754 
6755  // Actual offset of this suballocation doesn't match expected one.
6756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6757 
6758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6759  // Two adjacent free suballocations are invalid. They should be merged.
6760  VMA_VALIDATE(!prevFree || !currFree);
6761 
6762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6763 
6764  if(currFree)
6765  {
6766  calculatedSumFreeSize += subAlloc.size;
6767  ++calculatedFreeCount;
6768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6769  {
6770  ++freeSuballocationsToRegister;
6771  }
6772 
6773  // Margin required between allocations - every free space must be at least that large.
6774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6775  }
6776  else
6777  {
6778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6780 
6781  // Margin required between allocations - previous allocation must be free.
6782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6783  }
6784 
6785  calculatedOffset += subAlloc.size;
6786  prevFree = currFree;
6787  }
6788 
6789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6790  // match expected one.
6791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6792 
6793  VkDeviceSize lastSize = 0;
6794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6795  {
6796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6797 
6798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6800  // They must be sorted by size ascending.
6801  VMA_VALIDATE(suballocItem->size >= lastSize);
6802 
6803  lastSize = suballocItem->size;
6804  }
6805 
6806  // Check if totals match calculacted values.
6807  VMA_VALIDATE(ValidateFreeSuballocationList());
6808  VMA_VALIDATE(calculatedOffset == GetSize());
6809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6811 
6812  return true;
6813 }
6814 
6815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6816 {
6817  if(!m_FreeSuballocationsBySize.empty())
6818  {
6819  return m_FreeSuballocationsBySize.back()->size;
6820  }
6821  else
6822  {
6823  return 0;
6824  }
6825 }
6826 
6827 bool VmaBlockMetadata_Generic::IsEmpty() const
6828 {
6829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6830 }
6831 
6832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6833 {
6834  outInfo.blockCount = 1;
6835 
6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6837  outInfo.allocationCount = rangeCount - m_FreeCount;
6838  outInfo.unusedRangeCount = m_FreeCount;
6839 
6840  outInfo.unusedBytes = m_SumFreeSize;
6841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6842 
6843  outInfo.allocationSizeMin = UINT64_MAX;
6844  outInfo.allocationSizeMax = 0;
6845  outInfo.unusedRangeSizeMin = UINT64_MAX;
6846  outInfo.unusedRangeSizeMax = 0;
6847 
6848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6849  suballocItem != m_Suballocations.cend();
6850  ++suballocItem)
6851  {
6852  const VmaSuballocation& suballoc = *suballocItem;
6853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6854  {
6855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6857  }
6858  else
6859  {
6860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6862  }
6863  }
6864 }
6865 
6866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6867 {
6868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6869 
6870  inoutStats.size += GetSize();
6871  inoutStats.unusedSize += m_SumFreeSize;
6872  inoutStats.allocationCount += rangeCount - m_FreeCount;
6873  inoutStats.unusedRangeCount += m_FreeCount;
6874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6875 }
6876 
6877 #if VMA_STATS_STRING_ENABLED
6878 
6879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6880 {
6881  PrintDetailedMap_Begin(json,
6882  m_SumFreeSize, // unusedBytes
6883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6884  m_FreeCount); // unusedRangeCount
6885 
6886  size_t i = 0;
6887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6888  suballocItem != m_Suballocations.cend();
6889  ++suballocItem, ++i)
6890  {
6891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6892  {
6893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6894  }
6895  else
6896  {
6897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6898  }
6899  }
6900 
6901  PrintDetailedMap_End(json);
6902 }
6903 
6904 #endif // #if VMA_STATS_STRING_ENABLED
6905 
6906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6907  uint32_t currentFrameIndex,
6908  uint32_t frameInUseCount,
6909  VkDeviceSize bufferImageGranularity,
6910  VkDeviceSize allocSize,
6911  VkDeviceSize allocAlignment,
6912  bool upperAddress,
6913  VmaSuballocationType allocType,
6914  bool canMakeOtherLost,
6915  uint32_t strategy,
6916  VmaAllocationRequest* pAllocationRequest)
6917 {
6918  VMA_ASSERT(allocSize > 0);
6919  VMA_ASSERT(!upperAddress);
6920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6922  VMA_HEAVY_ASSERT(Validate());
6923 
6924  // There is not enough total free space in this block to fullfill the request: Early return.
6925  if(canMakeOtherLost == false &&
6926  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6927  {
6928  return false;
6929  }
6930 
6931  // New algorithm, efficiently searching freeSuballocationsBySize.
6932  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6933  if(freeSuballocCount > 0)
6934  {
6936  {
6937  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6938  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6939  m_FreeSuballocationsBySize.data(),
6940  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6941  allocSize + 2 * VMA_DEBUG_MARGIN,
6942  VmaSuballocationItemSizeLess());
6943  size_t index = it - m_FreeSuballocationsBySize.data();
6944  for(; index < freeSuballocCount; ++index)
6945  {
6946  if(CheckAllocation(
6947  currentFrameIndex,
6948  frameInUseCount,
6949  bufferImageGranularity,
6950  allocSize,
6951  allocAlignment,
6952  allocType,
6953  m_FreeSuballocationsBySize[index],
6954  false, // canMakeOtherLost
6955  &pAllocationRequest->offset,
6956  &pAllocationRequest->itemsToMakeLostCount,
6957  &pAllocationRequest->sumFreeSize,
6958  &pAllocationRequest->sumItemSize))
6959  {
6960  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6961  return true;
6962  }
6963  }
6964  }
6965  else // WORST_FIT, FIRST_FIT
6966  {
6967  // Search staring from biggest suballocations.
6968  for(size_t index = freeSuballocCount; index--; )
6969  {
6970  if(CheckAllocation(
6971  currentFrameIndex,
6972  frameInUseCount,
6973  bufferImageGranularity,
6974  allocSize,
6975  allocAlignment,
6976  allocType,
6977  m_FreeSuballocationsBySize[index],
6978  false, // canMakeOtherLost
6979  &pAllocationRequest->offset,
6980  &pAllocationRequest->itemsToMakeLostCount,
6981  &pAllocationRequest->sumFreeSize,
6982  &pAllocationRequest->sumItemSize))
6983  {
6984  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6985  return true;
6986  }
6987  }
6988  }
6989  }
6990 
6991  if(canMakeOtherLost)
6992  {
6993  // Brute-force algorithm. TODO: Come up with something better.
6994 
6995  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6996  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6997 
6998  VmaAllocationRequest tmpAllocRequest = {};
6999  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7000  suballocIt != m_Suballocations.end();
7001  ++suballocIt)
7002  {
7003  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7004  suballocIt->hAllocation->CanBecomeLost())
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  suballocIt,
7014  canMakeOtherLost,
7015  &tmpAllocRequest.offset,
7016  &tmpAllocRequest.itemsToMakeLostCount,
7017  &tmpAllocRequest.sumFreeSize,
7018  &tmpAllocRequest.sumItemSize))
7019  {
7020  tmpAllocRequest.item = suballocIt;
7021 
7022  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7024  {
7025  *pAllocationRequest = tmpAllocRequest;
7026  }
7027  }
7028  }
7029  }
7030 
7031  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7032  {
7033  return true;
7034  }
7035  }
7036 
7037  return false;
7038 }
7039 
7040 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7041  uint32_t currentFrameIndex,
7042  uint32_t frameInUseCount,
7043  VmaAllocationRequest* pAllocationRequest)
7044 {
7045  while(pAllocationRequest->itemsToMakeLostCount > 0)
7046  {
7047  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7048  {
7049  ++pAllocationRequest->item;
7050  }
7051  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7052  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7053  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7054  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7055  {
7056  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7057  --pAllocationRequest->itemsToMakeLostCount;
7058  }
7059  else
7060  {
7061  return false;
7062  }
7063  }
7064 
7065  VMA_HEAVY_ASSERT(Validate());
7066  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7067  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7068 
7069  return true;
7070 }
7071 
7072 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7073 {
7074  uint32_t lostAllocationCount = 0;
7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7076  it != m_Suballocations.end();
7077  ++it)
7078  {
7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7080  it->hAllocation->CanBecomeLost() &&
7081  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7082  {
7083  it = FreeSuballocation(it);
7084  ++lostAllocationCount;
7085  }
7086  }
7087  return lostAllocationCount;
7088 }
7089 
7090 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7091 {
7092  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7093  it != m_Suballocations.end();
7094  ++it)
7095  {
7096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7097  {
7098  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7099  {
7100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7101  return VK_ERROR_VALIDATION_FAILED_EXT;
7102  }
7103  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7104  {
7105  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7106  return VK_ERROR_VALIDATION_FAILED_EXT;
7107  }
7108  }
7109  }
7110 
7111  return VK_SUCCESS;
7112 }
7113 
7114 void VmaBlockMetadata_Generic::Alloc(
7115  const VmaAllocationRequest& request,
7116  VmaSuballocationType type,
7117  VkDeviceSize allocSize,
7118  bool upperAddress,
7119  VmaAllocation hAllocation)
7120 {
7121  VMA_ASSERT(!upperAddress);
7122  VMA_ASSERT(request.item != m_Suballocations.end());
7123  VmaSuballocation& suballoc = *request.item;
7124  // Given suballocation is a free block.
7125  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7126  // Given offset is inside this suballocation.
7127  VMA_ASSERT(request.offset >= suballoc.offset);
7128  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7129  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7130  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7131 
7132  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7133  // it to become used.
7134  UnregisterFreeSuballocation(request.item);
7135 
7136  suballoc.offset = request.offset;
7137  suballoc.size = allocSize;
7138  suballoc.type = type;
7139  suballoc.hAllocation = hAllocation;
7140 
7141  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7142  if(paddingEnd)
7143  {
7144  VmaSuballocation paddingSuballoc = {};
7145  paddingSuballoc.offset = request.offset + allocSize;
7146  paddingSuballoc.size = paddingEnd;
7147  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7148  VmaSuballocationList::iterator next = request.item;
7149  ++next;
7150  const VmaSuballocationList::iterator paddingEndItem =
7151  m_Suballocations.insert(next, paddingSuballoc);
7152  RegisterFreeSuballocation(paddingEndItem);
7153  }
7154 
7155  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7156  if(paddingBegin)
7157  {
7158  VmaSuballocation paddingSuballoc = {};
7159  paddingSuballoc.offset = request.offset - paddingBegin;
7160  paddingSuballoc.size = paddingBegin;
7161  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7162  const VmaSuballocationList::iterator paddingBeginItem =
7163  m_Suballocations.insert(request.item, paddingSuballoc);
7164  RegisterFreeSuballocation(paddingBeginItem);
7165  }
7166 
7167  // Update totals.
7168  m_FreeCount = m_FreeCount - 1;
7169  if(paddingBegin > 0)
7170  {
7171  ++m_FreeCount;
7172  }
7173  if(paddingEnd > 0)
7174  {
7175  ++m_FreeCount;
7176  }
7177  m_SumFreeSize -= allocSize;
7178 }
7179 
7180 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7181 {
7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7183  suballocItem != m_Suballocations.end();
7184  ++suballocItem)
7185  {
7186  VmaSuballocation& suballoc = *suballocItem;
7187  if(suballoc.hAllocation == allocation)
7188  {
7189  FreeSuballocation(suballocItem);
7190  VMA_HEAVY_ASSERT(Validate());
7191  return;
7192  }
7193  }
7194  VMA_ASSERT(0 && "Not found!");
7195 }
7196 
7197 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7198 {
7199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7200  suballocItem != m_Suballocations.end();
7201  ++suballocItem)
7202  {
7203  VmaSuballocation& suballoc = *suballocItem;
7204  if(suballoc.offset == offset)
7205  {
7206  FreeSuballocation(suballocItem);
7207  return;
7208  }
7209  }
7210  VMA_ASSERT(0 && "Not found!");
7211 }
7212 
7213 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7214 {
7215  VkDeviceSize lastSize = 0;
7216  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7217  {
7218  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7219 
7220  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7221  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7222  VMA_VALIDATE(it->size >= lastSize);
7223  lastSize = it->size;
7224  }
7225  return true;
7226 }
7227 
7228 bool VmaBlockMetadata_Generic::CheckAllocation(
7229  uint32_t currentFrameIndex,
7230  uint32_t frameInUseCount,
7231  VkDeviceSize bufferImageGranularity,
7232  VkDeviceSize allocSize,
7233  VkDeviceSize allocAlignment,
7234  VmaSuballocationType allocType,
7235  VmaSuballocationList::const_iterator suballocItem,
7236  bool canMakeOtherLost,
7237  VkDeviceSize* pOffset,
7238  size_t* itemsToMakeLostCount,
7239  VkDeviceSize* pSumFreeSize,
7240  VkDeviceSize* pSumItemSize) const
7241 {
7242  VMA_ASSERT(allocSize > 0);
7243  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7244  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7245  VMA_ASSERT(pOffset != VMA_NULL);
7246 
7247  *itemsToMakeLostCount = 0;
7248  *pSumFreeSize = 0;
7249  *pSumItemSize = 0;
7250 
7251  if(canMakeOtherLost)
7252  {
7253  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7254  {
7255  *pSumFreeSize = suballocItem->size;
7256  }
7257  else
7258  {
7259  if(suballocItem->hAllocation->CanBecomeLost() &&
7260  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7261  {
7262  ++*itemsToMakeLostCount;
7263  *pSumItemSize = suballocItem->size;
7264  }
7265  else
7266  {
7267  return false;
7268  }
7269  }
7270 
7271  // Remaining size is too small for this request: Early return.
7272  if(GetSize() - suballocItem->offset < allocSize)
7273  {
7274  return false;
7275  }
7276 
7277  // Start from offset equal to beginning of this suballocation.
7278  *pOffset = suballocItem->offset;
7279 
7280  // Apply VMA_DEBUG_MARGIN at the beginning.
7281  if(VMA_DEBUG_MARGIN > 0)
7282  {
7283  *pOffset += VMA_DEBUG_MARGIN;
7284  }
7285 
7286  // Apply alignment.
7287  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7288 
7289  // Check previous suballocations for BufferImageGranularity conflicts.
7290  // Make bigger alignment if necessary.
7291  if(bufferImageGranularity > 1)
7292  {
7293  bool bufferImageGranularityConflict = false;
7294  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7295  while(prevSuballocItem != m_Suballocations.cbegin())
7296  {
7297  --prevSuballocItem;
7298  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7299  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7300  {
7301  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7302  {
7303  bufferImageGranularityConflict = true;
7304  break;
7305  }
7306  }
7307  else
7308  // Already on previous page.
7309  break;
7310  }
7311  if(bufferImageGranularityConflict)
7312  {
7313  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7314  }
7315  }
7316 
7317  // Now that we have final *pOffset, check if we are past suballocItem.
7318  // If yes, return false - this function should be called for another suballocItem as starting point.
7319  if(*pOffset >= suballocItem->offset + suballocItem->size)
7320  {
7321  return false;
7322  }
7323 
7324  // Calculate padding at the beginning based on current offset.
7325  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7326 
7327  // Calculate required margin at the end.
7328  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7329 
7330  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7331  // Another early return check.
7332  if(suballocItem->offset + totalSize > GetSize())
7333  {
7334  return false;
7335  }
7336 
7337  // Advance lastSuballocItem until desired size is reached.
7338  // Update itemsToMakeLostCount.
7339  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7340  if(totalSize > suballocItem->size)
7341  {
7342  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7343  while(remainingSize > 0)
7344  {
7345  ++lastSuballocItem;
7346  if(lastSuballocItem == m_Suballocations.cend())
7347  {
7348  return false;
7349  }
7350  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7351  {
7352  *pSumFreeSize += lastSuballocItem->size;
7353  }
7354  else
7355  {
7356  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7357  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7358  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7359  {
7360  ++*itemsToMakeLostCount;
7361  *pSumItemSize += lastSuballocItem->size;
7362  }
7363  else
7364  {
7365  return false;
7366  }
7367  }
7368  remainingSize = (lastSuballocItem->size < remainingSize) ?
7369  remainingSize - lastSuballocItem->size : 0;
7370  }
7371  }
7372 
7373  // Check next suballocations for BufferImageGranularity conflicts.
7374  // If conflict exists, we must mark more allocations lost or fail.
7375  if(bufferImageGranularity > 1)
7376  {
7377  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7378  ++nextSuballocItem;
7379  while(nextSuballocItem != m_Suballocations.cend())
7380  {
7381  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7382  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7383  {
7384  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7385  {
7386  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7387  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7388  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7389  {
7390  ++*itemsToMakeLostCount;
7391  }
7392  else
7393  {
7394  return false;
7395  }
7396  }
7397  }
7398  else
7399  {
7400  // Already on next page.
7401  break;
7402  }
7403  ++nextSuballocItem;
7404  }
7405  }
7406  }
7407  else
7408  {
7409  const VmaSuballocation& suballoc = *suballocItem;
7410  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7411 
7412  *pSumFreeSize = suballoc.size;
7413 
7414  // Size of this suballocation is too small for this request: Early return.
7415  if(suballoc.size < allocSize)
7416  {
7417  return false;
7418  }
7419 
7420  // Start from offset equal to beginning of this suballocation.
7421  *pOffset = suballoc.offset;
7422 
7423  // Apply VMA_DEBUG_MARGIN at the beginning.
7424  if(VMA_DEBUG_MARGIN > 0)
7425  {
7426  *pOffset += VMA_DEBUG_MARGIN;
7427  }
7428 
7429  // Apply alignment.
7430  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7431 
7432  // Check previous suballocations for BufferImageGranularity conflicts.
7433  // Make bigger alignment if necessary.
7434  if(bufferImageGranularity > 1)
7435  {
7436  bool bufferImageGranularityConflict = false;
7437  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7438  while(prevSuballocItem != m_Suballocations.cbegin())
7439  {
7440  --prevSuballocItem;
7441  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7442  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7443  {
7444  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7445  {
7446  bufferImageGranularityConflict = true;
7447  break;
7448  }
7449  }
7450  else
7451  // Already on previous page.
7452  break;
7453  }
7454  if(bufferImageGranularityConflict)
7455  {
7456  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7457  }
7458  }
7459 
7460  // Calculate padding at the beginning based on current offset.
7461  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7462 
7463  // Calculate required margin at the end.
7464  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7465 
7466  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7467  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7468  {
7469  return false;
7470  }
7471 
7472  // Check next suballocations for BufferImageGranularity conflicts.
7473  // If conflict exists, allocation cannot be made here.
7474  if(bufferImageGranularity > 1)
7475  {
7476  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7477  ++nextSuballocItem;
7478  while(nextSuballocItem != m_Suballocations.cend())
7479  {
7480  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7481  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7482  {
7483  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7484  {
7485  return false;
7486  }
7487  }
7488  else
7489  {
7490  // Already on next page.
7491  break;
7492  }
7493  ++nextSuballocItem;
7494  }
7495  }
7496  }
7497 
7498  // All tests passed: Success. pOffset is already filled.
7499  return true;
7500 }
7501 
7502 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7503 {
7504  VMA_ASSERT(item != m_Suballocations.end());
7505  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7506 
7507  VmaSuballocationList::iterator nextItem = item;
7508  ++nextItem;
7509  VMA_ASSERT(nextItem != m_Suballocations.end());
7510  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7511 
7512  item->size += nextItem->size;
7513  --m_FreeCount;
7514  m_Suballocations.erase(nextItem);
7515 }
7516 
7517 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7518 {
7519  // Change this suballocation to be marked as free.
7520  VmaSuballocation& suballoc = *suballocItem;
7521  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7522  suballoc.hAllocation = VK_NULL_HANDLE;
7523 
7524  // Update totals.
7525  ++m_FreeCount;
7526  m_SumFreeSize += suballoc.size;
7527 
7528  // Merge with previous and/or next suballocation if it's also free.
7529  bool mergeWithNext = false;
7530  bool mergeWithPrev = false;
7531 
7532  VmaSuballocationList::iterator nextItem = suballocItem;
7533  ++nextItem;
7534  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7535  {
7536  mergeWithNext = true;
7537  }
7538 
7539  VmaSuballocationList::iterator prevItem = suballocItem;
7540  if(suballocItem != m_Suballocations.begin())
7541  {
7542  --prevItem;
7543  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7544  {
7545  mergeWithPrev = true;
7546  }
7547  }
7548 
7549  if(mergeWithNext)
7550  {
7551  UnregisterFreeSuballocation(nextItem);
7552  MergeFreeWithNext(suballocItem);
7553  }
7554 
7555  if(mergeWithPrev)
7556  {
7557  UnregisterFreeSuballocation(prevItem);
7558  MergeFreeWithNext(prevItem);
7559  RegisterFreeSuballocation(prevItem);
7560  return prevItem;
7561  }
7562  else
7563  {
7564  RegisterFreeSuballocation(suballocItem);
7565  return suballocItem;
7566  }
7567 }
7568 
7569 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7570 {
7571  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7572  VMA_ASSERT(item->size > 0);
7573 
7574  // You may want to enable this validation at the beginning or at the end of
7575  // this function, depending on what do you want to check.
7576  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7577 
7578  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7579  {
7580  if(m_FreeSuballocationsBySize.empty())
7581  {
7582  m_FreeSuballocationsBySize.push_back(item);
7583  }
7584  else
7585  {
7586  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7587  }
7588  }
7589 
7590  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7591 }
7592 
7593 
7594 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7595 {
7596  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7597  VMA_ASSERT(item->size > 0);
7598 
7599  // You may want to enable this validation at the beginning or at the end of
7600  // this function, depending on what do you want to check.
7601  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7602 
7603  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7604  {
7605  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7606  m_FreeSuballocationsBySize.data(),
7607  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7608  item,
7609  VmaSuballocationItemSizeLess());
7610  for(size_t index = it - m_FreeSuballocationsBySize.data();
7611  index < m_FreeSuballocationsBySize.size();
7612  ++index)
7613  {
7614  if(m_FreeSuballocationsBySize[index] == item)
7615  {
7616  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7617  return;
7618  }
7619  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7620  }
7621  VMA_ASSERT(0 && "Not found.");
7622  }
7623 
7624  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7625 }
7626 
7628 // class VmaBlockMetadata_Linear
7629 
7630 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7631  VmaBlockMetadata(hAllocator),
7632  m_SumFreeSize(0),
7633  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7634  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7635  m_1stVectorIndex(0),
7636  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7637  m_1stNullItemsBeginCount(0),
7638  m_1stNullItemsMiddleCount(0),
7639  m_2ndNullItemsCount(0)
7640 {
7641 }
7642 
7643 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7644 {
7645 }
7646 
7647 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7648 {
7649  VmaBlockMetadata::Init(size);
7650  m_SumFreeSize = size;
7651 }
7652 
7653 bool VmaBlockMetadata_Linear::Validate() const
7654 {
7655  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7656  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7657 
7658  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7659  VMA_VALIDATE(!suballocations1st.empty() ||
7660  suballocations2nd.empty() ||
7661  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7662 
7663  if(!suballocations1st.empty())
7664  {
7665  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7666  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7667  // Null item at the end should be just pop_back().
7668  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7669  }
7670  if(!suballocations2nd.empty())
7671  {
7672  // Null item at the end should be just pop_back().
7673  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7674  }
7675 
7676  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7677  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7678 
7679  VkDeviceSize sumUsedSize = 0;
7680  const size_t suballoc1stCount = suballocations1st.size();
7681  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7682 
7683  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7684  {
7685  const size_t suballoc2ndCount = suballocations2nd.size();
7686  size_t nullItem2ndCount = 0;
7687  for(size_t i = 0; i < suballoc2ndCount; ++i)
7688  {
7689  const VmaSuballocation& suballoc = suballocations2nd[i];
7690  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7691 
7692  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7693  VMA_VALIDATE(suballoc.offset >= offset);
7694 
7695  if(!currFree)
7696  {
7697  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7698  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7699  sumUsedSize += suballoc.size;
7700  }
7701  else
7702  {
7703  ++nullItem2ndCount;
7704  }
7705 
7706  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7707  }
7708 
7709  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7710  }
7711 
7712  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7713  {
7714  const VmaSuballocation& suballoc = suballocations1st[i];
7715  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7716  suballoc.hAllocation == VK_NULL_HANDLE);
7717  }
7718 
7719  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7720 
7721  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7722  {
7723  const VmaSuballocation& suballoc = suballocations1st[i];
7724  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7725 
7726  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7727  VMA_VALIDATE(suballoc.offset >= offset);
7728  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7729 
7730  if(!currFree)
7731  {
7732  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7733  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7734  sumUsedSize += suballoc.size;
7735  }
7736  else
7737  {
7738  ++nullItem1stCount;
7739  }
7740 
7741  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7742  }
7743  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7744 
7745  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7746  {
7747  const size_t suballoc2ndCount = suballocations2nd.size();
7748  size_t nullItem2ndCount = 0;
7749  for(size_t i = suballoc2ndCount; i--; )
7750  {
7751  const VmaSuballocation& suballoc = suballocations2nd[i];
7752  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7753 
7754  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7755  VMA_VALIDATE(suballoc.offset >= offset);
7756 
7757  if(!currFree)
7758  {
7759  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7760  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7761  sumUsedSize += suballoc.size;
7762  }
7763  else
7764  {
7765  ++nullItem2ndCount;
7766  }
7767 
7768  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7769  }
7770 
7771  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7772  }
7773 
7774  VMA_VALIDATE(offset <= GetSize());
7775  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7776 
7777  return true;
7778 }
7779 
7780 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7781 {
7782  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7783  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7784 }
7785 
7786 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7787 {
7788  const VkDeviceSize size = GetSize();
7789 
7790  /*
7791  We don't consider gaps inside allocation vectors with freed allocations because
7792  they are not suitable for reuse in linear allocator. We consider only space that
7793  is available for new allocations.
7794  */
7795  if(IsEmpty())
7796  {
7797  return size;
7798  }
7799 
7800  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7801 
7802  switch(m_2ndVectorMode)
7803  {
7804  case SECOND_VECTOR_EMPTY:
7805  /*
7806  Available space is after end of 1st, as well as before beginning of 1st (which
7807  whould make it a ring buffer).
7808  */
7809  {
7810  const size_t suballocations1stCount = suballocations1st.size();
7811  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7812  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7813  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7814  return VMA_MAX(
7815  firstSuballoc.offset,
7816  size - (lastSuballoc.offset + lastSuballoc.size));
7817  }
7818  break;
7819 
7820  case SECOND_VECTOR_RING_BUFFER:
7821  /*
7822  Available space is only between end of 2nd and beginning of 1st.
7823  */
7824  {
7825  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7826  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7827  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7828  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7829  }
7830  break;
7831 
7832  case SECOND_VECTOR_DOUBLE_STACK:
7833  /*
7834  Available space is only between end of 1st and top of 2nd.
7835  */
7836  {
7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7838  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7839  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7840  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7841  }
7842  break;
7843 
7844  default:
7845  VMA_ASSERT(0);
7846  return 0;
7847  }
7848 }
7849 
7850 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7851 {
7852  const VkDeviceSize size = GetSize();
7853  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7854  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7855  const size_t suballoc1stCount = suballocations1st.size();
7856  const size_t suballoc2ndCount = suballocations2nd.size();
7857 
7858  outInfo.blockCount = 1;
7859  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7860  outInfo.unusedRangeCount = 0;
7861  outInfo.usedBytes = 0;
7862  outInfo.allocationSizeMin = UINT64_MAX;
7863  outInfo.allocationSizeMax = 0;
7864  outInfo.unusedRangeSizeMin = UINT64_MAX;
7865  outInfo.unusedRangeSizeMax = 0;
7866 
7867  VkDeviceSize lastOffset = 0;
7868 
7869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7870  {
7871  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7872  size_t nextAlloc2ndIndex = 0;
7873  while(lastOffset < freeSpace2ndTo1stEnd)
7874  {
7875  // Find next non-null allocation or move nextAllocIndex to the end.
7876  while(nextAlloc2ndIndex < suballoc2ndCount &&
7877  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7878  {
7879  ++nextAlloc2ndIndex;
7880  }
7881 
7882  // Found non-null allocation.
7883  if(nextAlloc2ndIndex < suballoc2ndCount)
7884  {
7885  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7886 
7887  // 1. Process free space before this allocation.
7888  if(lastOffset < suballoc.offset)
7889  {
7890  // There is free space from lastOffset to suballoc.offset.
7891  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7892  ++outInfo.unusedRangeCount;
7893  outInfo.unusedBytes += unusedRangeSize;
7894  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7895  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7896  }
7897 
7898  // 2. Process this allocation.
7899  // There is allocation with suballoc.offset, suballoc.size.
7900  outInfo.usedBytes += suballoc.size;
7901  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7902  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7903 
7904  // 3. Prepare for next iteration.
7905  lastOffset = suballoc.offset + suballoc.size;
7906  ++nextAlloc2ndIndex;
7907  }
7908  // We are at the end.
7909  else
7910  {
7911  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7912  if(lastOffset < freeSpace2ndTo1stEnd)
7913  {
7914  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7915  ++outInfo.unusedRangeCount;
7916  outInfo.unusedBytes += unusedRangeSize;
7917  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7918  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7919  }
7920 
7921  // End of loop.
7922  lastOffset = freeSpace2ndTo1stEnd;
7923  }
7924  }
7925  }
7926 
7927  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7928  const VkDeviceSize freeSpace1stTo2ndEnd =
7929  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7930  while(lastOffset < freeSpace1stTo2ndEnd)
7931  {
7932  // Find next non-null allocation or move nextAllocIndex to the end.
7933  while(nextAlloc1stIndex < suballoc1stCount &&
7934  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7935  {
7936  ++nextAlloc1stIndex;
7937  }
7938 
7939  // Found non-null allocation.
7940  if(nextAlloc1stIndex < suballoc1stCount)
7941  {
7942  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7943 
7944  // 1. Process free space before this allocation.
7945  if(lastOffset < suballoc.offset)
7946  {
7947  // There is free space from lastOffset to suballoc.offset.
7948  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7949  ++outInfo.unusedRangeCount;
7950  outInfo.unusedBytes += unusedRangeSize;
7951  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7952  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7953  }
7954 
7955  // 2. Process this allocation.
7956  // There is allocation with suballoc.offset, suballoc.size.
7957  outInfo.usedBytes += suballoc.size;
7958  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7959  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7960 
7961  // 3. Prepare for next iteration.
7962  lastOffset = suballoc.offset + suballoc.size;
7963  ++nextAlloc1stIndex;
7964  }
7965  // We are at the end.
7966  else
7967  {
7968  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7969  if(lastOffset < freeSpace1stTo2ndEnd)
7970  {
7971  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7972  ++outInfo.unusedRangeCount;
7973  outInfo.unusedBytes += unusedRangeSize;
7974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7976  }
7977 
7978  // End of loop.
7979  lastOffset = freeSpace1stTo2ndEnd;
7980  }
7981  }
7982 
7983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7984  {
7985  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7986  while(lastOffset < size)
7987  {
7988  // Find next non-null allocation or move nextAllocIndex to the end.
7989  while(nextAlloc2ndIndex != SIZE_MAX &&
7990  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7991  {
7992  --nextAlloc2ndIndex;
7993  }
7994 
7995  // Found non-null allocation.
7996  if(nextAlloc2ndIndex != SIZE_MAX)
7997  {
7998  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7999 
8000  // 1. Process free space before this allocation.
8001  if(lastOffset < suballoc.offset)
8002  {
8003  // There is free space from lastOffset to suballoc.offset.
8004  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8005  ++outInfo.unusedRangeCount;
8006  outInfo.unusedBytes += unusedRangeSize;
8007  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8008  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8009  }
8010 
8011  // 2. Process this allocation.
8012  // There is allocation with suballoc.offset, suballoc.size.
8013  outInfo.usedBytes += suballoc.size;
8014  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8015  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8016 
8017  // 3. Prepare for next iteration.
8018  lastOffset = suballoc.offset + suballoc.size;
8019  --nextAlloc2ndIndex;
8020  }
8021  // We are at the end.
8022  else
8023  {
8024  // There is free space from lastOffset to size.
8025  if(lastOffset < size)
8026  {
8027  const VkDeviceSize unusedRangeSize = size - lastOffset;
8028  ++outInfo.unusedRangeCount;
8029  outInfo.unusedBytes += unusedRangeSize;
8030  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8031  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8032  }
8033 
8034  // End of loop.
8035  lastOffset = size;
8036  }
8037  }
8038  }
8039 
8040  outInfo.unusedBytes = size - outInfo.usedBytes;
8041 }
8042 
8043 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8044 {
8045  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8046  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8047  const VkDeviceSize size = GetSize();
8048  const size_t suballoc1stCount = suballocations1st.size();
8049  const size_t suballoc2ndCount = suballocations2nd.size();
8050 
8051  inoutStats.size += size;
8052 
8053  VkDeviceSize lastOffset = 0;
8054 
8055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8056  {
8057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8058  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8059  while(lastOffset < freeSpace2ndTo1stEnd)
8060  {
8061  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8062  while(nextAlloc2ndIndex < suballoc2ndCount &&
8063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8064  {
8065  ++nextAlloc2ndIndex;
8066  }
8067 
8068  // Found non-null allocation.
8069  if(nextAlloc2ndIndex < suballoc2ndCount)
8070  {
8071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8072 
8073  // 1. Process free space before this allocation.
8074  if(lastOffset < suballoc.offset)
8075  {
8076  // There is free space from lastOffset to suballoc.offset.
8077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8078  inoutStats.unusedSize += unusedRangeSize;
8079  ++inoutStats.unusedRangeCount;
8080  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8081  }
8082 
8083  // 2. Process this allocation.
8084  // There is allocation with suballoc.offset, suballoc.size.
8085  ++inoutStats.allocationCount;
8086 
8087  // 3. Prepare for next iteration.
8088  lastOffset = suballoc.offset + suballoc.size;
8089  ++nextAlloc2ndIndex;
8090  }
8091  // We are at the end.
8092  else
8093  {
8094  if(lastOffset < freeSpace2ndTo1stEnd)
8095  {
8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8097  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8098  inoutStats.unusedSize += unusedRangeSize;
8099  ++inoutStats.unusedRangeCount;
8100  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8101  }
8102 
8103  // End of loop.
8104  lastOffset = freeSpace2ndTo1stEnd;
8105  }
8106  }
8107  }
8108 
8109  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8110  const VkDeviceSize freeSpace1stTo2ndEnd =
8111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8112  while(lastOffset < freeSpace1stTo2ndEnd)
8113  {
8114  // Find next non-null allocation or move nextAllocIndex to the end.
8115  while(nextAlloc1stIndex < suballoc1stCount &&
8116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8117  {
8118  ++nextAlloc1stIndex;
8119  }
8120 
8121  // Found non-null allocation.
8122  if(nextAlloc1stIndex < suballoc1stCount)
8123  {
8124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8125 
8126  // 1. Process free space before this allocation.
8127  if(lastOffset < suballoc.offset)
8128  {
8129  // There is free space from lastOffset to suballoc.offset.
8130  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8131  inoutStats.unusedSize += unusedRangeSize;
8132  ++inoutStats.unusedRangeCount;
8133  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8134  }
8135 
8136  // 2. Process this allocation.
8137  // There is allocation with suballoc.offset, suballoc.size.
8138  ++inoutStats.allocationCount;
8139 
8140  // 3. Prepare for next iteration.
8141  lastOffset = suballoc.offset + suballoc.size;
8142  ++nextAlloc1stIndex;
8143  }
8144  // We are at the end.
8145  else
8146  {
8147  if(lastOffset < freeSpace1stTo2ndEnd)
8148  {
8149  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8150  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8151  inoutStats.unusedSize += unusedRangeSize;
8152  ++inoutStats.unusedRangeCount;
8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8154  }
8155 
8156  // End of loop.
8157  lastOffset = freeSpace1stTo2ndEnd;
8158  }
8159  }
8160 
8161  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8162  {
8163  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8164  while(lastOffset < size)
8165  {
8166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8167  while(nextAlloc2ndIndex != SIZE_MAX &&
8168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8169  {
8170  --nextAlloc2ndIndex;
8171  }
8172 
8173  // Found non-null allocation.
8174  if(nextAlloc2ndIndex != SIZE_MAX)
8175  {
8176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8177 
8178  // 1. Process free space before this allocation.
8179  if(lastOffset < suballoc.offset)
8180  {
8181  // There is free space from lastOffset to suballoc.offset.
8182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8183  inoutStats.unusedSize += unusedRangeSize;
8184  ++inoutStats.unusedRangeCount;
8185  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8186  }
8187 
8188  // 2. Process this allocation.
8189  // There is allocation with suballoc.offset, suballoc.size.
8190  ++inoutStats.allocationCount;
8191 
8192  // 3. Prepare for next iteration.
8193  lastOffset = suballoc.offset + suballoc.size;
8194  --nextAlloc2ndIndex;
8195  }
8196  // We are at the end.
8197  else
8198  {
8199  if(lastOffset < size)
8200  {
8201  // There is free space from lastOffset to size.
8202  const VkDeviceSize unusedRangeSize = size - lastOffset;
8203  inoutStats.unusedSize += unusedRangeSize;
8204  ++inoutStats.unusedRangeCount;
8205  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8206  }
8207 
8208  // End of loop.
8209  lastOffset = size;
8210  }
8211  }
8212  }
8213 }
8214 
8215 #if VMA_STATS_STRING_ENABLED
8216 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8217 {
8218  const VkDeviceSize size = GetSize();
8219  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8220  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8221  const size_t suballoc1stCount = suballocations1st.size();
8222  const size_t suballoc2ndCount = suballocations2nd.size();
8223 
8224  // FIRST PASS
8225 
8226  size_t unusedRangeCount = 0;
8227  VkDeviceSize usedBytes = 0;
8228 
8229  VkDeviceSize lastOffset = 0;
8230 
8231  size_t alloc2ndCount = 0;
8232  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8233  {
8234  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8235  size_t nextAlloc2ndIndex = 0;
8236  while(lastOffset < freeSpace2ndTo1stEnd)
8237  {
8238  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8239  while(nextAlloc2ndIndex < suballoc2ndCount &&
8240  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8241  {
8242  ++nextAlloc2ndIndex;
8243  }
8244 
8245  // Found non-null allocation.
8246  if(nextAlloc2ndIndex < suballoc2ndCount)
8247  {
8248  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8249 
8250  // 1. Process free space before this allocation.
8251  if(lastOffset < suballoc.offset)
8252  {
8253  // There is free space from lastOffset to suballoc.offset.
8254  ++unusedRangeCount;
8255  }
8256 
8257  // 2. Process this allocation.
8258  // There is allocation with suballoc.offset, suballoc.size.
8259  ++alloc2ndCount;
8260  usedBytes += suballoc.size;
8261 
8262  // 3. Prepare for next iteration.
8263  lastOffset = suballoc.offset + suballoc.size;
8264  ++nextAlloc2ndIndex;
8265  }
8266  // We are at the end.
8267  else
8268  {
8269  if(lastOffset < freeSpace2ndTo1stEnd)
8270  {
8271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8272  ++unusedRangeCount;
8273  }
8274 
8275  // End of loop.
8276  lastOffset = freeSpace2ndTo1stEnd;
8277  }
8278  }
8279  }
8280 
8281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8282  size_t alloc1stCount = 0;
8283  const VkDeviceSize freeSpace1stTo2ndEnd =
8284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8285  while(lastOffset < freeSpace1stTo2ndEnd)
8286  {
8287  // Find next non-null allocation or move nextAllocIndex to the end.
8288  while(nextAlloc1stIndex < suballoc1stCount &&
8289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8290  {
8291  ++nextAlloc1stIndex;
8292  }
8293 
8294  // Found non-null allocation.
8295  if(nextAlloc1stIndex < suballoc1stCount)
8296  {
8297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8298 
8299  // 1. Process free space before this allocation.
8300  if(lastOffset < suballoc.offset)
8301  {
8302  // There is free space from lastOffset to suballoc.offset.
8303  ++unusedRangeCount;
8304  }
8305 
8306  // 2. Process this allocation.
8307  // There is allocation with suballoc.offset, suballoc.size.
8308  ++alloc1stCount;
8309  usedBytes += suballoc.size;
8310 
8311  // 3. Prepare for next iteration.
8312  lastOffset = suballoc.offset + suballoc.size;
8313  ++nextAlloc1stIndex;
8314  }
8315  // We are at the end.
8316  else
8317  {
8318  if(lastOffset < size)
8319  {
8320  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8321  ++unusedRangeCount;
8322  }
8323 
8324  // End of loop.
8325  lastOffset = freeSpace1stTo2ndEnd;
8326  }
8327  }
8328 
8329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8330  {
8331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8332  while(lastOffset < size)
8333  {
8334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8335  while(nextAlloc2ndIndex != SIZE_MAX &&
8336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8337  {
8338  --nextAlloc2ndIndex;
8339  }
8340 
8341  // Found non-null allocation.
8342  if(nextAlloc2ndIndex != SIZE_MAX)
8343  {
8344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8345 
8346  // 1. Process free space before this allocation.
8347  if(lastOffset < suballoc.offset)
8348  {
8349  // There is free space from lastOffset to suballoc.offset.
8350  ++unusedRangeCount;
8351  }
8352 
8353  // 2. Process this allocation.
8354  // There is allocation with suballoc.offset, suballoc.size.
8355  ++alloc2ndCount;
8356  usedBytes += suballoc.size;
8357 
8358  // 3. Prepare for next iteration.
8359  lastOffset = suballoc.offset + suballoc.size;
8360  --nextAlloc2ndIndex;
8361  }
8362  // We are at the end.
8363  else
8364  {
8365  if(lastOffset < size)
8366  {
8367  // There is free space from lastOffset to size.
8368  ++unusedRangeCount;
8369  }
8370 
8371  // End of loop.
8372  lastOffset = size;
8373  }
8374  }
8375  }
8376 
8377  const VkDeviceSize unusedBytes = size - usedBytes;
8378  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8379 
8380  // SECOND PASS
8381  lastOffset = 0;
8382 
8383  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8384  {
8385  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8386  size_t nextAlloc2ndIndex = 0;
8387  while(lastOffset < freeSpace2ndTo1stEnd)
8388  {
8389  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8390  while(nextAlloc2ndIndex < suballoc2ndCount &&
8391  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8392  {
8393  ++nextAlloc2ndIndex;
8394  }
8395 
8396  // Found non-null allocation.
8397  if(nextAlloc2ndIndex < suballoc2ndCount)
8398  {
8399  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8400 
8401  // 1. Process free space before this allocation.
8402  if(lastOffset < suballoc.offset)
8403  {
8404  // There is free space from lastOffset to suballoc.offset.
8405  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8406  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8407  }
8408 
8409  // 2. Process this allocation.
8410  // There is allocation with suballoc.offset, suballoc.size.
8411  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8412 
8413  // 3. Prepare for next iteration.
8414  lastOffset = suballoc.offset + suballoc.size;
8415  ++nextAlloc2ndIndex;
8416  }
8417  // We are at the end.
8418  else
8419  {
8420  if(lastOffset < freeSpace2ndTo1stEnd)
8421  {
8422  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8423  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8424  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8425  }
8426 
8427  // End of loop.
8428  lastOffset = freeSpace2ndTo1stEnd;
8429  }
8430  }
8431  }
8432 
8433  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8434  while(lastOffset < freeSpace1stTo2ndEnd)
8435  {
8436  // Find next non-null allocation or move nextAllocIndex to the end.
8437  while(nextAlloc1stIndex < suballoc1stCount &&
8438  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8439  {
8440  ++nextAlloc1stIndex;
8441  }
8442 
8443  // Found non-null allocation.
8444  if(nextAlloc1stIndex < suballoc1stCount)
8445  {
8446  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8447 
8448  // 1. Process free space before this allocation.
8449  if(lastOffset < suballoc.offset)
8450  {
8451  // There is free space from lastOffset to suballoc.offset.
8452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8454  }
8455 
8456  // 2. Process this allocation.
8457  // There is allocation with suballoc.offset, suballoc.size.
8458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8459 
8460  // 3. Prepare for next iteration.
8461  lastOffset = suballoc.offset + suballoc.size;
8462  ++nextAlloc1stIndex;
8463  }
8464  // We are at the end.
8465  else
8466  {
8467  if(lastOffset < freeSpace1stTo2ndEnd)
8468  {
8469  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8470  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8472  }
8473 
8474  // End of loop.
8475  lastOffset = freeSpace1stTo2ndEnd;
8476  }
8477  }
8478 
8479  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8480  {
8481  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8482  while(lastOffset < size)
8483  {
8484  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8485  while(nextAlloc2ndIndex != SIZE_MAX &&
8486  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8487  {
8488  --nextAlloc2ndIndex;
8489  }
8490 
8491  // Found non-null allocation.
8492  if(nextAlloc2ndIndex != SIZE_MAX)
8493  {
8494  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8495 
8496  // 1. Process free space before this allocation.
8497  if(lastOffset < suballoc.offset)
8498  {
8499  // There is free space from lastOffset to suballoc.offset.
8500  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8501  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8502  }
8503 
8504  // 2. Process this allocation.
8505  // There is allocation with suballoc.offset, suballoc.size.
8506  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8507 
8508  // 3. Prepare for next iteration.
8509  lastOffset = suballoc.offset + suballoc.size;
8510  --nextAlloc2ndIndex;
8511  }
8512  // We are at the end.
8513  else
8514  {
8515  if(lastOffset < size)
8516  {
8517  // There is free space from lastOffset to size.
8518  const VkDeviceSize unusedRangeSize = size - lastOffset;
8519  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8520  }
8521 
8522  // End of loop.
8523  lastOffset = size;
8524  }
8525  }
8526  }
8527 
8528  PrintDetailedMap_End(json);
8529 }
8530 #endif // #if VMA_STATS_STRING_ENABLED
8531 
8532 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8533  uint32_t currentFrameIndex,
8534  uint32_t frameInUseCount,
8535  VkDeviceSize bufferImageGranularity,
8536  VkDeviceSize allocSize,
8537  VkDeviceSize allocAlignment,
8538  bool upperAddress,
8539  VmaSuballocationType allocType,
8540  bool canMakeOtherLost,
8541  uint32_t strategy,
8542  VmaAllocationRequest* pAllocationRequest)
8543 {
8544  VMA_ASSERT(allocSize > 0);
8545  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8546  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8547  VMA_HEAVY_ASSERT(Validate());
8548 
8549  const VkDeviceSize size = GetSize();
8550  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8551  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8552 
8553  if(upperAddress)
8554  {
8555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8556  {
8557  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8558  return false;
8559  }
8560 
8561  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8562  if(allocSize > size)
8563  {
8564  return false;
8565  }
8566  VkDeviceSize resultBaseOffset = size - allocSize;
8567  if(!suballocations2nd.empty())
8568  {
8569  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8570  resultBaseOffset = lastSuballoc.offset - allocSize;
8571  if(allocSize > lastSuballoc.offset)
8572  {
8573  return false;
8574  }
8575  }
8576 
8577  // Start from offset equal to end of free space.
8578  VkDeviceSize resultOffset = resultBaseOffset;
8579 
8580  // Apply VMA_DEBUG_MARGIN at the end.
8581  if(VMA_DEBUG_MARGIN > 0)
8582  {
8583  if(resultOffset < VMA_DEBUG_MARGIN)
8584  {
8585  return false;
8586  }
8587  resultOffset -= VMA_DEBUG_MARGIN;
8588  }
8589 
8590  // Apply alignment.
8591  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8592 
8593  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8594  // Make bigger alignment if necessary.
8595  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8596  {
8597  bool bufferImageGranularityConflict = false;
8598  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8599  {
8600  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8601  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8602  {
8603  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8604  {
8605  bufferImageGranularityConflict = true;
8606  break;
8607  }
8608  }
8609  else
8610  // Already on previous page.
8611  break;
8612  }
8613  if(bufferImageGranularityConflict)
8614  {
8615  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8616  }
8617  }
8618 
8619  // There is enough free space.
8620  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8621  suballocations1st.back().offset + suballocations1st.back().size :
8622  0;
8623  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8624  {
8625  // Check previous suballocations for BufferImageGranularity conflicts.
8626  // If conflict exists, allocation cannot be made here.
8627  if(bufferImageGranularity > 1)
8628  {
8629  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8630  {
8631  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8633  {
8634  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8635  {
8636  return false;
8637  }
8638  }
8639  else
8640  {
8641  // Already on next page.
8642  break;
8643  }
8644  }
8645  }
8646 
8647  // All tests passed: Success.
8648  pAllocationRequest->offset = resultOffset;
8649  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8650  pAllocationRequest->sumItemSize = 0;
8651  // pAllocationRequest->item unused.
8652  pAllocationRequest->itemsToMakeLostCount = 0;
8653  return true;
8654  }
8655  }
8656  else // !upperAddress
8657  {
8658  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8659  {
8660  // Try to allocate at the end of 1st vector.
8661 
8662  VkDeviceSize resultBaseOffset = 0;
8663  if(!suballocations1st.empty())
8664  {
8665  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8666  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8667  }
8668 
8669  // Start from offset equal to beginning of free space.
8670  VkDeviceSize resultOffset = resultBaseOffset;
8671 
8672  // Apply VMA_DEBUG_MARGIN at the beginning.
8673  if(VMA_DEBUG_MARGIN > 0)
8674  {
8675  resultOffset += VMA_DEBUG_MARGIN;
8676  }
8677 
8678  // Apply alignment.
8679  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8680 
8681  // Check previous suballocations for BufferImageGranularity conflicts.
8682  // Make bigger alignment if necessary.
8683  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8684  {
8685  bool bufferImageGranularityConflict = false;
8686  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8687  {
8688  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8690  {
8691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8692  {
8693  bufferImageGranularityConflict = true;
8694  break;
8695  }
8696  }
8697  else
8698  // Already on previous page.
8699  break;
8700  }
8701  if(bufferImageGranularityConflict)
8702  {
8703  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8704  }
8705  }
8706 
8707  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8708  suballocations2nd.back().offset : size;
8709 
8710  // There is enough free space at the end after alignment.
8711  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8712  {
8713  // Check next suballocations for BufferImageGranularity conflicts.
8714  // If conflict exists, allocation cannot be made here.
8715  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8716  {
8717  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8718  {
8719  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8720  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8721  {
8722  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8723  {
8724  return false;
8725  }
8726  }
8727  else
8728  {
8729  // Already on previous page.
8730  break;
8731  }
8732  }
8733  }
8734 
8735  // All tests passed: Success.
8736  pAllocationRequest->offset = resultOffset;
8737  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8738  pAllocationRequest->sumItemSize = 0;
8739  // pAllocationRequest->item unused.
8740  pAllocationRequest->itemsToMakeLostCount = 0;
8741  return true;
8742  }
8743  }
8744 
8745  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8746  // beginning of 1st vector as the end of free space.
8747  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8748  {
8749  VMA_ASSERT(!suballocations1st.empty());
8750 
8751  VkDeviceSize resultBaseOffset = 0;
8752  if(!suballocations2nd.empty())
8753  {
8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8755  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8756  }
8757 
8758  // Start from offset equal to beginning of free space.
8759  VkDeviceSize resultOffset = resultBaseOffset;
8760 
8761  // Apply VMA_DEBUG_MARGIN at the beginning.
8762  if(VMA_DEBUG_MARGIN > 0)
8763  {
8764  resultOffset += VMA_DEBUG_MARGIN;
8765  }
8766 
8767  // Apply alignment.
8768  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8769 
8770  // Check previous suballocations for BufferImageGranularity conflicts.
8771  // Make bigger alignment if necessary.
8772  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8773  {
8774  bool bufferImageGranularityConflict = false;
8775  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8776  {
8777  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8778  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8779  {
8780  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8781  {
8782  bufferImageGranularityConflict = true;
8783  break;
8784  }
8785  }
8786  else
8787  // Already on previous page.
8788  break;
8789  }
8790  if(bufferImageGranularityConflict)
8791  {
8792  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8793  }
8794  }
8795 
8796  pAllocationRequest->itemsToMakeLostCount = 0;
8797  pAllocationRequest->sumItemSize = 0;
8798  size_t index1st = m_1stNullItemsBeginCount;
8799 
8800  if(canMakeOtherLost)
8801  {
8802  while(index1st < suballocations1st.size() &&
8803  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8804  {
8805  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
8807  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8808  {
8809  // No problem.
8810  }
8811  else
8812  {
8813  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8814  if(suballoc.hAllocation->CanBecomeLost() &&
8815  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8816  {
8817  ++pAllocationRequest->itemsToMakeLostCount;
8818  pAllocationRequest->sumItemSize += suballoc.size;
8819  }
8820  else
8821  {
8822  return false;
8823  }
8824  }
8825  ++index1st;
8826  }
8827 
8828  // Check next suballocations for BufferImageGranularity conflicts.
8829  // If conflict exists, we must mark more allocations lost or fail.
8830  if(bufferImageGranularity > 1)
8831  {
8832  while(index1st < suballocations1st.size())
8833  {
8834  const VmaSuballocation& suballoc = suballocations1st[index1st];
8835  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8836  {
8837  if(suballoc.hAllocation != VK_NULL_HANDLE)
8838  {
8839  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8840  if(suballoc.hAllocation->CanBecomeLost() &&
8841  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8842  {
8843  ++pAllocationRequest->itemsToMakeLostCount;
8844  pAllocationRequest->sumItemSize += suballoc.size;
8845  }
8846  else
8847  {
8848  return false;
8849  }
8850  }
8851  }
8852  else
8853  {
8854  // Already on next page.
8855  break;
8856  }
8857  ++index1st;
8858  }
8859  }
8860  }
8861 
8862  // There is enough free space at the end after alignment.
8863  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8864  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8865  {
8866  // Check next suballocations for BufferImageGranularity conflicts.
8867  // If conflict exists, allocation cannot be made here.
8868  if(bufferImageGranularity > 1)
8869  {
8870  for(size_t nextSuballocIndex = index1st;
8871  nextSuballocIndex < suballocations1st.size();
8872  nextSuballocIndex++)
8873  {
8874  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8875  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8876  {
8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8878  {
8879  return false;
8880  }
8881  }
8882  else
8883  {
8884  // Already on next page.
8885  break;
8886  }
8887  }
8888  }
8889 
8890  // All tests passed: Success.
8891  pAllocationRequest->offset = resultOffset;
8892  pAllocationRequest->sumFreeSize =
8893  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8894  - resultBaseOffset
8895  - pAllocationRequest->sumItemSize;
8896  // pAllocationRequest->item unused.
8897  return true;
8898  }
8899  }
8900  }
8901 
8902  return false;
8903 }
8904 
8905 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8906  uint32_t currentFrameIndex,
8907  uint32_t frameInUseCount,
8908  VmaAllocationRequest* pAllocationRequest)
8909 {
8910  if(pAllocationRequest->itemsToMakeLostCount == 0)
8911  {
8912  return true;
8913  }
8914 
8915  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8916 
8917  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8918  size_t index1st = m_1stNullItemsBeginCount;
8919  size_t madeLostCount = 0;
8920  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8921  {
8922  VMA_ASSERT(index1st < suballocations1st.size());
8923  VmaSuballocation& suballoc = suballocations1st[index1st];
8924  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8925  {
8926  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8927  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8928  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8929  {
8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8931  suballoc.hAllocation = VK_NULL_HANDLE;
8932  m_SumFreeSize += suballoc.size;
8933  ++m_1stNullItemsMiddleCount;
8934  ++madeLostCount;
8935  }
8936  else
8937  {
8938  return false;
8939  }
8940  }
8941  ++index1st;
8942  }
8943 
8944  CleanupAfterFree();
8945  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8946 
8947  return true;
8948 }
8949 
8950 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8951 {
8952  uint32_t lostAllocationCount = 0;
8953 
8954  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8955  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8956  {
8957  VmaSuballocation& suballoc = suballocations1st[i];
8958  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8959  suballoc.hAllocation->CanBecomeLost() &&
8960  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8961  {
8962  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8963  suballoc.hAllocation = VK_NULL_HANDLE;
8964  ++m_1stNullItemsMiddleCount;
8965  m_SumFreeSize += suballoc.size;
8966  ++lostAllocationCount;
8967  }
8968  }
8969 
8970  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8971  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8972  {
8973  VmaSuballocation& suballoc = suballocations2nd[i];
8974  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8975  suballoc.hAllocation->CanBecomeLost() &&
8976  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8977  {
8978  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8979  suballoc.hAllocation = VK_NULL_HANDLE;
8980  ++m_2ndNullItemsCount;
8981  ++lostAllocationCount;
8982  }
8983  }
8984 
8985  if(lostAllocationCount)
8986  {
8987  CleanupAfterFree();
8988  }
8989 
8990  return lostAllocationCount;
8991 }
8992 
8993 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8994 {
8995  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8996  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8997  {
8998  const VmaSuballocation& suballoc = suballocations1st[i];
8999  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9000  {
9001  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9002  {
9003  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9004  return VK_ERROR_VALIDATION_FAILED_EXT;
9005  }
9006  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9007  {
9008  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9009  return VK_ERROR_VALIDATION_FAILED_EXT;
9010  }
9011  }
9012  }
9013 
9014  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9015  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9016  {
9017  const VmaSuballocation& suballoc = suballocations2nd[i];
9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9019  {
9020  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9021  {
9022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9023  return VK_ERROR_VALIDATION_FAILED_EXT;
9024  }
9025  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9026  {
9027  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9028  return VK_ERROR_VALIDATION_FAILED_EXT;
9029  }
9030  }
9031  }
9032 
9033  return VK_SUCCESS;
9034 }
9035 
9036 void VmaBlockMetadata_Linear::Alloc(
9037  const VmaAllocationRequest& request,
9038  VmaSuballocationType type,
9039  VkDeviceSize allocSize,
9040  bool upperAddress,
9041  VmaAllocation hAllocation)
9042 {
9043  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9044 
9045  if(upperAddress)
9046  {
9047  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9048  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9049  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9050  suballocations2nd.push_back(newSuballoc);
9051  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9052  }
9053  else
9054  {
9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9056 
9057  // First allocation.
9058  if(suballocations1st.empty())
9059  {
9060  suballocations1st.push_back(newSuballoc);
9061  }
9062  else
9063  {
9064  // New allocation at the end of 1st vector.
9065  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9066  {
9067  // Check if it fits before the end of the block.
9068  VMA_ASSERT(request.offset + allocSize <= GetSize());
9069  suballocations1st.push_back(newSuballoc);
9070  }
9071  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9072  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9073  {
9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9075 
9076  switch(m_2ndVectorMode)
9077  {
9078  case SECOND_VECTOR_EMPTY:
9079  // First allocation from second part ring buffer.
9080  VMA_ASSERT(suballocations2nd.empty());
9081  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9082  break;
9083  case SECOND_VECTOR_RING_BUFFER:
9084  // 2-part ring buffer is already started.
9085  VMA_ASSERT(!suballocations2nd.empty());
9086  break;
9087  case SECOND_VECTOR_DOUBLE_STACK:
9088  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9089  break;
9090  default:
9091  VMA_ASSERT(0);
9092  }
9093 
9094  suballocations2nd.push_back(newSuballoc);
9095  }
9096  else
9097  {
9098  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9099  }
9100  }
9101  }
9102 
9103  m_SumFreeSize -= newSuballoc.size;
9104 }
9105 
9106 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9107 {
9108  FreeAtOffset(allocation->GetOffset());
9109 }
9110 
9111 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9112 {
9113  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9114  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9115 
9116  if(!suballocations1st.empty())
9117  {
9118  // First allocation: Mark it as next empty at the beginning.
9119  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9120  if(firstSuballoc.offset == offset)
9121  {
9122  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9123  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9124  m_SumFreeSize += firstSuballoc.size;
9125  ++m_1stNullItemsBeginCount;
9126  CleanupAfterFree();
9127  return;
9128  }
9129  }
9130 
9131  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9132  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9133  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9134  {
9135  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9136  if(lastSuballoc.offset == offset)
9137  {
9138  m_SumFreeSize += lastSuballoc.size;
9139  suballocations2nd.pop_back();
9140  CleanupAfterFree();
9141  return;
9142  }
9143  }
9144  // Last allocation in 1st vector.
9145  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9146  {
9147  VmaSuballocation& lastSuballoc = suballocations1st.back();
9148  if(lastSuballoc.offset == offset)
9149  {
9150  m_SumFreeSize += lastSuballoc.size;
9151  suballocations1st.pop_back();
9152  CleanupAfterFree();
9153  return;
9154  }
9155  }
9156 
9157  // Item from the middle of 1st vector.
9158  {
9159  VmaSuballocation refSuballoc;
9160  refSuballoc.offset = offset;
9161  // Rest of members stays uninitialized intentionally for better performance.
9162  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9163  suballocations1st.begin() + m_1stNullItemsBeginCount,
9164  suballocations1st.end(),
9165  refSuballoc);
9166  if(it != suballocations1st.end())
9167  {
9168  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9169  it->hAllocation = VK_NULL_HANDLE;
9170  ++m_1stNullItemsMiddleCount;
9171  m_SumFreeSize += it->size;
9172  CleanupAfterFree();
9173  return;
9174  }
9175  }
9176 
9177  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9178  {
9179  // Item from the middle of 2nd vector.
9180  VmaSuballocation refSuballoc;
9181  refSuballoc.offset = offset;
9182  // Rest of members stays uninitialized intentionally for better performance.
9183  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9184  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9185  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9186  if(it != suballocations2nd.end())
9187  {
9188  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9189  it->hAllocation = VK_NULL_HANDLE;
9190  ++m_2ndNullItemsCount;
9191  m_SumFreeSize += it->size;
9192  CleanupAfterFree();
9193  return;
9194  }
9195  }
9196 
9197  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9198 }
9199 
9200 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9201 {
9202  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9203  const size_t suballocCount = AccessSuballocations1st().size();
9204  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9205 }
9206 
9207 void VmaBlockMetadata_Linear::CleanupAfterFree()
9208 {
9209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9211 
9212  if(IsEmpty())
9213  {
9214  suballocations1st.clear();
9215  suballocations2nd.clear();
9216  m_1stNullItemsBeginCount = 0;
9217  m_1stNullItemsMiddleCount = 0;
9218  m_2ndNullItemsCount = 0;
9219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9220  }
9221  else
9222  {
9223  const size_t suballoc1stCount = suballocations1st.size();
9224  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9225  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9226 
9227  // Find more null items at the beginning of 1st vector.
9228  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9229  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9230  {
9231  ++m_1stNullItemsBeginCount;
9232  --m_1stNullItemsMiddleCount;
9233  }
9234 
9235  // Find more null items at the end of 1st vector.
9236  while(m_1stNullItemsMiddleCount > 0 &&
9237  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9238  {
9239  --m_1stNullItemsMiddleCount;
9240  suballocations1st.pop_back();
9241  }
9242 
9243  // Find more null items at the end of 2nd vector.
9244  while(m_2ndNullItemsCount > 0 &&
9245  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9246  {
9247  --m_2ndNullItemsCount;
9248  suballocations2nd.pop_back();
9249  }
9250 
9251  if(ShouldCompact1st())
9252  {
9253  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9254  size_t srcIndex = m_1stNullItemsBeginCount;
9255  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9256  {
9257  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9258  {
9259  ++srcIndex;
9260  }
9261  if(dstIndex != srcIndex)
9262  {
9263  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9264  }
9265  ++srcIndex;
9266  }
9267  suballocations1st.resize(nonNullItemCount);
9268  m_1stNullItemsBeginCount = 0;
9269  m_1stNullItemsMiddleCount = 0;
9270  }
9271 
9272  // 2nd vector became empty.
9273  if(suballocations2nd.empty())
9274  {
9275  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9276  }
9277 
9278  // 1st vector became empty.
9279  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9280  {
9281  suballocations1st.clear();
9282  m_1stNullItemsBeginCount = 0;
9283 
9284  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9285  {
9286  // Swap 1st with 2nd. Now 2nd is empty.
9287  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9288  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9289  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9290  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9291  {
9292  ++m_1stNullItemsBeginCount;
9293  --m_1stNullItemsMiddleCount;
9294  }
9295  m_2ndNullItemsCount = 0;
9296  m_1stVectorIndex ^= 1;
9297  }
9298  }
9299  }
9300 
9301  VMA_HEAVY_ASSERT(Validate());
9302 }
9303 
9304 
9306 // class VmaBlockMetadata_Buddy
9307 
9308 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9309  VmaBlockMetadata(hAllocator),
9310  m_Root(VMA_NULL),
9311  m_AllocationCount(0),
9312  m_FreeCount(1),
9313  m_SumFreeSize(0)
9314 {
9315  memset(m_FreeList, 0, sizeof(m_FreeList));
9316 }
9317 
9318 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9319 {
9320  DeleteNode(m_Root);
9321 }
9322 
9323 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9324 {
9325  VmaBlockMetadata::Init(size);
9326 
9327  m_UsableSize = VmaPrevPow2(size);
9328  m_SumFreeSize = m_UsableSize;
9329 
9330  // Calculate m_LevelCount.
9331  m_LevelCount = 1;
9332  while(m_LevelCount < MAX_LEVELS &&
9333  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9334  {
9335  ++m_LevelCount;
9336  }
9337 
9338  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9339  rootNode->offset = 0;
9340  rootNode->type = Node::TYPE_FREE;
9341  rootNode->parent = VMA_NULL;
9342  rootNode->buddy = VMA_NULL;
9343 
9344  m_Root = rootNode;
9345  AddToFreeListFront(0, rootNode);
9346 }
9347 
9348 bool VmaBlockMetadata_Buddy::Validate() const
9349 {
9350  // Validate tree.
9351  ValidationContext ctx;
9352  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9353  {
9354  VMA_VALIDATE(false && "ValidateNode failed.");
9355  }
9356  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9357  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9358 
9359  // Validate free node lists.
9360  for(uint32_t level = 0; level < m_LevelCount; ++level)
9361  {
9362  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9363  m_FreeList[level].front->free.prev == VMA_NULL);
9364 
9365  for(Node* node = m_FreeList[level].front;
9366  node != VMA_NULL;
9367  node = node->free.next)
9368  {
9369  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9370 
9371  if(node->free.next == VMA_NULL)
9372  {
9373  VMA_VALIDATE(m_FreeList[level].back == node);
9374  }
9375  else
9376  {
9377  VMA_VALIDATE(node->free.next->free.prev == node);
9378  }
9379  }
9380  }
9381 
9382  // Validate that free lists ar higher levels are empty.
9383  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9384  {
9385  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9386  }
9387 
9388  return true;
9389 }
9390 
9391 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9392 {
9393  for(uint32_t level = 0; level < m_LevelCount; ++level)
9394  {
9395  if(m_FreeList[level].front != VMA_NULL)
9396  {
9397  return LevelToNodeSize(level);
9398  }
9399  }
9400  return 0;
9401 }
9402 
9403 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9404 {
9405  const VkDeviceSize unusableSize = GetUnusableSize();
9406 
9407  outInfo.blockCount = 1;
9408 
9409  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9410  outInfo.usedBytes = outInfo.unusedBytes = 0;
9411 
9412  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9413  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9414  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9415 
9416  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9417 
9418  if(unusableSize > 0)
9419  {
9420  ++outInfo.unusedRangeCount;
9421  outInfo.unusedBytes += unusableSize;
9422  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9423  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9424  }
9425 }
9426 
9427 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9428 {
9429  const VkDeviceSize unusableSize = GetUnusableSize();
9430 
9431  inoutStats.size += GetSize();
9432  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9433  inoutStats.allocationCount += m_AllocationCount;
9434  inoutStats.unusedRangeCount += m_FreeCount;
9435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9436 
9437  if(unusableSize > 0)
9438  {
9439  ++inoutStats.unusedRangeCount;
9440  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9441  }
9442 }
9443 
9444 #if VMA_STATS_STRING_ENABLED
9445 
9446 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9447 {
9448  // TODO optimize
9449  VmaStatInfo stat;
9450  CalcAllocationStatInfo(stat);
9451 
9452  PrintDetailedMap_Begin(
9453  json,
9454  stat.unusedBytes,
9455  stat.allocationCount,
9456  stat.unusedRangeCount);
9457 
9458  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9459 
9460  const VkDeviceSize unusableSize = GetUnusableSize();
9461  if(unusableSize > 0)
9462  {
9463  PrintDetailedMap_UnusedRange(json,
9464  m_UsableSize, // offset
9465  unusableSize); // size
9466  }
9467 
9468  PrintDetailedMap_End(json);
9469 }
9470 
9471 #endif // #if VMA_STATS_STRING_ENABLED
9472 
9473 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9474  uint32_t currentFrameIndex,
9475  uint32_t frameInUseCount,
9476  VkDeviceSize bufferImageGranularity,
9477  VkDeviceSize allocSize,
9478  VkDeviceSize allocAlignment,
9479  bool upperAddress,
9480  VmaSuballocationType allocType,
9481  bool canMakeOtherLost,
9482  uint32_t strategy,
9483  VmaAllocationRequest* pAllocationRequest)
9484 {
9485  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9486 
9487  // Simple way to respect bufferImageGranularity. May be optimized some day.
9488  // Whenever it might be an OPTIMAL image...
9489  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9491  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9492  {
9493  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9494  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9495  }
9496 
9497  if(allocSize > m_UsableSize)
9498  {
9499  return false;
9500  }
9501 
9502  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9503  for(uint32_t level = targetLevel + 1; level--; )
9504  {
9505  for(Node* freeNode = m_FreeList[level].front;
9506  freeNode != VMA_NULL;
9507  freeNode = freeNode->free.next)
9508  {
9509  if(freeNode->offset % allocAlignment == 0)
9510  {
9511  pAllocationRequest->offset = freeNode->offset;
9512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9513  pAllocationRequest->sumItemSize = 0;
9514  pAllocationRequest->itemsToMakeLostCount = 0;
9515  pAllocationRequest->customData = (void*)(uintptr_t)level;
9516  return true;
9517  }
9518  }
9519  }
9520 
9521  return false;
9522 }
9523 
9524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9525  uint32_t currentFrameIndex,
9526  uint32_t frameInUseCount,
9527  VmaAllocationRequest* pAllocationRequest)
9528 {
9529  /*
9530  Lost allocations are not supported in buddy allocator at the moment.
9531  Support might be added in the future.
9532  */
9533  return pAllocationRequest->itemsToMakeLostCount == 0;
9534 }
9535 
9536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9537 {
9538  /*
9539  Lost allocations are not supported in buddy allocator at the moment.
9540  Support might be added in the future.
9541  */
9542  return 0;
9543 }
9544 
9545 void VmaBlockMetadata_Buddy::Alloc(
9546  const VmaAllocationRequest& request,
9547  VmaSuballocationType type,
9548  VkDeviceSize allocSize,
9549  bool upperAddress,
9550  VmaAllocation hAllocation)
9551 {
9552  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9553  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9554 
9555  Node* currNode = m_FreeList[currLevel].front;
9556  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9557  while(currNode->offset != request.offset)
9558  {
9559  currNode = currNode->free.next;
9560  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9561  }
9562 
9563  // Go down, splitting free nodes.
9564  while(currLevel < targetLevel)
9565  {
9566  // currNode is already first free node at currLevel.
9567  // Remove it from list of free nodes at this currLevel.
9568  RemoveFromFreeList(currLevel, currNode);
9569 
9570  const uint32_t childrenLevel = currLevel + 1;
9571 
9572  // Create two free sub-nodes.
9573  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9574  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9575 
9576  leftChild->offset = currNode->offset;
9577  leftChild->type = Node::TYPE_FREE;
9578  leftChild->parent = currNode;
9579  leftChild->buddy = rightChild;
9580 
9581  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9582  rightChild->type = Node::TYPE_FREE;
9583  rightChild->parent = currNode;
9584  rightChild->buddy = leftChild;
9585 
9586  // Convert current currNode to split type.
9587  currNode->type = Node::TYPE_SPLIT;
9588  currNode->split.leftChild = leftChild;
9589 
9590  // Add child nodes to free list. Order is important!
9591  AddToFreeListFront(childrenLevel, rightChild);
9592  AddToFreeListFront(childrenLevel, leftChild);
9593 
9594  ++m_FreeCount;
9595  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9596  ++currLevel;
9597  currNode = m_FreeList[currLevel].front;
9598 
9599  /*
9600  We can be sure that currNode, as left child of node previously split,
9601  also fullfills the alignment requirement.
9602  */
9603  }
9604 
9605  // Remove from free list.
9606  VMA_ASSERT(currLevel == targetLevel &&
9607  currNode != VMA_NULL &&
9608  currNode->type == Node::TYPE_FREE);
9609  RemoveFromFreeList(currLevel, currNode);
9610 
9611  // Convert to allocation node.
9612  currNode->type = Node::TYPE_ALLOCATION;
9613  currNode->allocation.alloc = hAllocation;
9614 
9615  ++m_AllocationCount;
9616  --m_FreeCount;
9617  m_SumFreeSize -= allocSize;
9618 }
9619 
9620 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9621 {
9622  if(node->type == Node::TYPE_SPLIT)
9623  {
9624  DeleteNode(node->split.leftChild->buddy);
9625  DeleteNode(node->split.leftChild);
9626  }
9627 
9628  vma_delete(GetAllocationCallbacks(), node);
9629 }
9630 
9631 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9632 {
9633  VMA_VALIDATE(level < m_LevelCount);
9634  VMA_VALIDATE(curr->parent == parent);
9635  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9636  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9637  switch(curr->type)
9638  {
9639  case Node::TYPE_FREE:
9640  // curr->free.prev, next are validated separately.
9641  ctx.calculatedSumFreeSize += levelNodeSize;
9642  ++ctx.calculatedFreeCount;
9643  break;
9644  case Node::TYPE_ALLOCATION:
9645  ++ctx.calculatedAllocationCount;
9646  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9647  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9648  break;
9649  case Node::TYPE_SPLIT:
9650  {
9651  const uint32_t childrenLevel = level + 1;
9652  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9653  const Node* const leftChild = curr->split.leftChild;
9654  VMA_VALIDATE(leftChild != VMA_NULL);
9655  VMA_VALIDATE(leftChild->offset == curr->offset);
9656  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9657  {
9658  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9659  }
9660  const Node* const rightChild = leftChild->buddy;
9661  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9662  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9663  {
9664  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9665  }
9666  }
9667  break;
9668  default:
9669  return false;
9670  }
9671 
9672  return true;
9673 }
9674 
9675 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9676 {
9677  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9678  uint32_t level = 0;
9679  VkDeviceSize currLevelNodeSize = m_UsableSize;
9680  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9681  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9682  {
9683  ++level;
9684  currLevelNodeSize = nextLevelNodeSize;
9685  nextLevelNodeSize = currLevelNodeSize >> 1;
9686  }
9687  return level;
9688 }
9689 
9690 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9691 {
9692  // Find node and level.
9693  Node* node = m_Root;
9694  VkDeviceSize nodeOffset = 0;
9695  uint32_t level = 0;
9696  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9697  while(node->type == Node::TYPE_SPLIT)
9698  {
9699  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9700  if(offset < nodeOffset + nextLevelSize)
9701  {
9702  node = node->split.leftChild;
9703  }
9704  else
9705  {
9706  node = node->split.leftChild->buddy;
9707  nodeOffset += nextLevelSize;
9708  }
9709  ++level;
9710  levelNodeSize = nextLevelSize;
9711  }
9712 
9713  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9714  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9715 
9716  ++m_FreeCount;
9717  --m_AllocationCount;
9718  m_SumFreeSize += alloc->GetSize();
9719 
9720  node->type = Node::TYPE_FREE;
9721 
9722  // Join free nodes if possible.
9723  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9724  {
9725  RemoveFromFreeList(level, node->buddy);
9726  Node* const parent = node->parent;
9727 
9728  vma_delete(GetAllocationCallbacks(), node->buddy);
9729  vma_delete(GetAllocationCallbacks(), node);
9730  parent->type = Node::TYPE_FREE;
9731 
9732  node = parent;
9733  --level;
9734  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9735  --m_FreeCount;
9736  }
9737 
9738  AddToFreeListFront(level, node);
9739 }
9740 
9741 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9742 {
9743  switch(node->type)
9744  {
9745  case Node::TYPE_FREE:
9746  ++outInfo.unusedRangeCount;
9747  outInfo.unusedBytes += levelNodeSize;
9748  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9749  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9750  break;
9751  case Node::TYPE_ALLOCATION:
9752  {
9753  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9754  ++outInfo.allocationCount;
9755  outInfo.usedBytes += allocSize;
9756  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9757  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9758 
9759  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9760  if(unusedRangeSize > 0)
9761  {
9762  ++outInfo.unusedRangeCount;
9763  outInfo.unusedBytes += unusedRangeSize;
9764  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9765  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9766  }
9767  }
9768  break;
9769  case Node::TYPE_SPLIT:
9770  {
9771  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9772  const Node* const leftChild = node->split.leftChild;
9773  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9774  const Node* const rightChild = leftChild->buddy;
9775  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9776  }
9777  break;
9778  default:
9779  VMA_ASSERT(0);
9780  }
9781 }
9782 
9783 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9784 {
9785  VMA_ASSERT(node->type == Node::TYPE_FREE);
9786 
9787  // List is empty.
9788  Node* const frontNode = m_FreeList[level].front;
9789  if(frontNode == VMA_NULL)
9790  {
9791  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9792  node->free.prev = node->free.next = VMA_NULL;
9793  m_FreeList[level].front = m_FreeList[level].back = node;
9794  }
9795  else
9796  {
9797  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9798  node->free.prev = VMA_NULL;
9799  node->free.next = frontNode;
9800  frontNode->free.prev = node;
9801  m_FreeList[level].front = node;
9802  }
9803 }
9804 
9805 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9806 {
9807  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9808 
9809  // It is at the front.
9810  if(node->free.prev == VMA_NULL)
9811  {
9812  VMA_ASSERT(m_FreeList[level].front == node);
9813  m_FreeList[level].front = node->free.next;
9814  }
9815  else
9816  {
9817  Node* const prevFreeNode = node->free.prev;
9818  VMA_ASSERT(prevFreeNode->free.next == node);
9819  prevFreeNode->free.next = node->free.next;
9820  }
9821 
9822  // It is at the back.
9823  if(node->free.next == VMA_NULL)
9824  {
9825  VMA_ASSERT(m_FreeList[level].back == node);
9826  m_FreeList[level].back = node->free.prev;
9827  }
9828  else
9829  {
9830  Node* const nextFreeNode = node->free.next;
9831  VMA_ASSERT(nextFreeNode->free.prev == node);
9832  nextFreeNode->free.prev = node->free.prev;
9833  }
9834 }
9835 
9836 #if VMA_STATS_STRING_ENABLED
9837 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9838 {
9839  switch(node->type)
9840  {
9841  case Node::TYPE_FREE:
9842  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9843  break;
9844  case Node::TYPE_ALLOCATION:
9845  {
9846  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9848  if(allocSize < levelNodeSize)
9849  {
9850  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9851  }
9852  }
9853  break;
9854  case Node::TYPE_SPLIT:
9855  {
9856  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9857  const Node* const leftChild = node->split.leftChild;
9858  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9859  const Node* const rightChild = leftChild->buddy;
9860  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9861  }
9862  break;
9863  default:
9864  VMA_ASSERT(0);
9865  }
9866 }
9867 #endif // #if VMA_STATS_STRING_ENABLED
9868 
9869 
9871 // class VmaDeviceMemoryBlock
9872 
9873 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9874  m_pMetadata(VMA_NULL),
9875  m_MemoryTypeIndex(UINT32_MAX),
9876  m_Id(0),
9877  m_hMemory(VK_NULL_HANDLE),
9878  m_MapCount(0),
9879  m_pMappedData(VMA_NULL)
9880 {
9881 }
9882 
9883 void VmaDeviceMemoryBlock::Init(
9884  VmaAllocator hAllocator,
9885  uint32_t newMemoryTypeIndex,
9886  VkDeviceMemory newMemory,
9887  VkDeviceSize newSize,
9888  uint32_t id,
9889  uint32_t algorithm)
9890 {
9891  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9892 
9893  m_MemoryTypeIndex = newMemoryTypeIndex;
9894  m_Id = id;
9895  m_hMemory = newMemory;
9896 
9897  switch(algorithm)
9898  {
9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9901  break;
9903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9904  break;
9905  default:
9906  VMA_ASSERT(0);
9907  // Fall-through.
9908  case 0:
9909  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9910  }
9911  m_pMetadata->Init(newSize);
9912 }
9913 
9914 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9915 {
9916  // This is the most important assert in the entire library.
9917  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9918  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9919 
9920  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9921  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9922  m_hMemory = VK_NULL_HANDLE;
9923 
9924  vma_delete(allocator, m_pMetadata);
9925  m_pMetadata = VMA_NULL;
9926 }
9927 
9928 bool VmaDeviceMemoryBlock::Validate() const
9929 {
9930  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9931  (m_pMetadata->GetSize() != 0));
9932 
9933  return m_pMetadata->Validate();
9934 }
9935 
9936 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9937 {
9938  void* pData = nullptr;
9939  VkResult res = Map(hAllocator, 1, &pData);
9940  if(res != VK_SUCCESS)
9941  {
9942  return res;
9943  }
9944 
9945  res = m_pMetadata->CheckCorruption(pData);
9946 
9947  Unmap(hAllocator, 1);
9948 
9949  return res;
9950 }
9951 
9952 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9953 {
9954  if(count == 0)
9955  {
9956  return VK_SUCCESS;
9957  }
9958 
9959  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9960  if(m_MapCount != 0)
9961  {
9962  m_MapCount += count;
9963  VMA_ASSERT(m_pMappedData != VMA_NULL);
9964  if(ppData != VMA_NULL)
9965  {
9966  *ppData = m_pMappedData;
9967  }
9968  return VK_SUCCESS;
9969  }
9970  else
9971  {
9972  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9973  hAllocator->m_hDevice,
9974  m_hMemory,
9975  0, // offset
9976  VK_WHOLE_SIZE,
9977  0, // flags
9978  &m_pMappedData);
9979  if(result == VK_SUCCESS)
9980  {
9981  if(ppData != VMA_NULL)
9982  {
9983  *ppData = m_pMappedData;
9984  }
9985  m_MapCount = count;
9986  }
9987  return result;
9988  }
9989 }
9990 
9991 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9992 {
9993  if(count == 0)
9994  {
9995  return;
9996  }
9997 
9998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9999  if(m_MapCount >= count)
10000  {
10001  m_MapCount -= count;
10002  if(m_MapCount == 0)
10003  {
10004  m_pMappedData = VMA_NULL;
10005  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10006  }
10007  }
10008  else
10009  {
10010  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10011  }
10012 }
10013 
10014 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10015 {
10016  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10017  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10018 
10019  void* pData;
10020  VkResult res = Map(hAllocator, 1, &pData);
10021  if(res != VK_SUCCESS)
10022  {
10023  return res;
10024  }
10025 
10026  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10027  VmaWriteMagicValue(pData, allocOffset + allocSize);
10028 
10029  Unmap(hAllocator, 1);
10030 
10031  return VK_SUCCESS;
10032 }
10033 
10034 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10035 {
10036  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10037  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10038 
10039  void* pData;
10040  VkResult res = Map(hAllocator, 1, &pData);
10041  if(res != VK_SUCCESS)
10042  {
10043  return res;
10044  }
10045 
10046  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10047  {
10048  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10049  }
10050  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10051  {
10052  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10053  }
10054 
10055  Unmap(hAllocator, 1);
10056 
10057  return VK_SUCCESS;
10058 }
10059 
10060 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10061  const VmaAllocator hAllocator,
10062  const VmaAllocation hAllocation,
10063  VkBuffer hBuffer)
10064 {
10065  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10066  hAllocation->GetBlock() == this);
10067  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10068  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10069  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10070  hAllocator->m_hDevice,
10071  hBuffer,
10072  m_hMemory,
10073  hAllocation->GetOffset());
10074 }
10075 
10076 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10077  const VmaAllocator hAllocator,
10078  const VmaAllocation hAllocation,
10079  VkImage hImage)
10080 {
10081  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10082  hAllocation->GetBlock() == this);
10083  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10084  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10085  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10086  hAllocator->m_hDevice,
10087  hImage,
10088  m_hMemory,
10089  hAllocation->GetOffset());
10090 }
10091 
10092 static void InitStatInfo(VmaStatInfo& outInfo)
10093 {
10094  memset(&outInfo, 0, sizeof(outInfo));
10095  outInfo.allocationSizeMin = UINT64_MAX;
10096  outInfo.unusedRangeSizeMin = UINT64_MAX;
10097 }
10098 
10099 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10100 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10101 {
10102  inoutInfo.blockCount += srcInfo.blockCount;
10103  inoutInfo.allocationCount += srcInfo.allocationCount;
10104  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10105  inoutInfo.usedBytes += srcInfo.usedBytes;
10106  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10107  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10108  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10109  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10110  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10111 }
10112 
10113 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10114 {
10115  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10116  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10117  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10118  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10119 }
10120 
10121 VmaPool_T::VmaPool_T(
10122  VmaAllocator hAllocator,
10123  const VmaPoolCreateInfo& createInfo,
10124  VkDeviceSize preferredBlockSize) :
10125  m_BlockVector(
10126  hAllocator,
10127  createInfo.memoryTypeIndex,
10128  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10129  createInfo.minBlockCount,
10130  createInfo.maxBlockCount,
10131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10132  createInfo.frameInUseCount,
10133  true, // isCustomPool
10134  createInfo.blockSize != 0, // explicitBlockSize
10135  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10136  m_Id(0)
10137 {
10138 }
10139 
10140 VmaPool_T::~VmaPool_T()
10141 {
10142 }
10143 
10144 #if VMA_STATS_STRING_ENABLED
10145 
10146 #endif // #if VMA_STATS_STRING_ENABLED
10147 
10148 VmaBlockVector::VmaBlockVector(
10149  VmaAllocator hAllocator,
10150  uint32_t memoryTypeIndex,
10151  VkDeviceSize preferredBlockSize,
10152  size_t minBlockCount,
10153  size_t maxBlockCount,
10154  VkDeviceSize bufferImageGranularity,
10155  uint32_t frameInUseCount,
10156  bool isCustomPool,
10157  bool explicitBlockSize,
10158  uint32_t algorithm) :
10159  m_hAllocator(hAllocator),
10160  m_MemoryTypeIndex(memoryTypeIndex),
10161  m_PreferredBlockSize(preferredBlockSize),
10162  m_MinBlockCount(minBlockCount),
10163  m_MaxBlockCount(maxBlockCount),
10164  m_BufferImageGranularity(bufferImageGranularity),
10165  m_FrameInUseCount(frameInUseCount),
10166  m_IsCustomPool(isCustomPool),
10167  m_ExplicitBlockSize(explicitBlockSize),
10168  m_Algorithm(algorithm),
10169  m_HasEmptyBlock(false),
10170  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10171  m_pDefragmentator(VMA_NULL),
10172  m_NextBlockId(0)
10173 {
10174 }
10175 
10176 VmaBlockVector::~VmaBlockVector()
10177 {
10178  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10179 
10180  for(size_t i = m_Blocks.size(); i--; )
10181  {
10182  m_Blocks[i]->Destroy(m_hAllocator);
10183  vma_delete(m_hAllocator, m_Blocks[i]);
10184  }
10185 }
10186 
10187 VkResult VmaBlockVector::CreateMinBlocks()
10188 {
10189  for(size_t i = 0; i < m_MinBlockCount; ++i)
10190  {
10191  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10192  if(res != VK_SUCCESS)
10193  {
10194  return res;
10195  }
10196  }
10197  return VK_SUCCESS;
10198 }
10199 
10200 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10201 {
10202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10203 
10204  const size_t blockCount = m_Blocks.size();
10205 
10206  pStats->size = 0;
10207  pStats->unusedSize = 0;
10208  pStats->allocationCount = 0;
10209  pStats->unusedRangeCount = 0;
10210  pStats->unusedRangeSizeMax = 0;
10211  pStats->blockCount = blockCount;
10212 
10213  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10214  {
10215  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10216  VMA_ASSERT(pBlock);
10217  VMA_HEAVY_ASSERT(pBlock->Validate());
10218  pBlock->m_pMetadata->AddPoolStats(*pStats);
10219  }
10220 }
10221 
10222 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10223 {
10224  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10225  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10226  (VMA_DEBUG_MARGIN > 0) &&
10227  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10228 }
10229 
10230 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10231 
10232 VkResult VmaBlockVector::Allocate(
10233  VmaPool hCurrentPool,
10234  uint32_t currentFrameIndex,
10235  VkDeviceSize size,
10236  VkDeviceSize alignment,
10237  const VmaAllocationCreateInfo& createInfo,
10238  VmaSuballocationType suballocType,
10239  VmaAllocation* pAllocation)
10240 {
10241  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10242  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10243  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10244  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10245  const bool canCreateNewBlock =
10246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10247  (m_Blocks.size() < m_MaxBlockCount);
10248  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10249 
10250  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10251  // Which in turn is available only when maxBlockCount = 1.
10252  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10253  {
10254  canMakeOtherLost = false;
10255  }
10256 
10257  // Upper address can only be used with linear allocator and within single memory block.
10258  if(isUpperAddress &&
10259  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10260  {
10261  return VK_ERROR_FEATURE_NOT_PRESENT;
10262  }
10263 
10264  // Validate strategy.
10265  switch(strategy)
10266  {
10267  case 0:
10269  break;
10273  break;
10274  default:
10275  return VK_ERROR_FEATURE_NOT_PRESENT;
10276  }
10277 
10278  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10279  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10280  {
10281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10282  }
10283 
10284  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10285 
10286  /*
10287  Under certain condition, this whole section can be skipped for optimization, so
10288  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10289  e.g. for custom pools with linear algorithm.
10290  */
10291  if(!canMakeOtherLost || canCreateNewBlock)
10292  {
10293  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10294  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10296 
10297  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10298  {
10299  // Use only last block.
10300  if(!m_Blocks.empty())
10301  {
10302  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10303  VMA_ASSERT(pCurrBlock);
10304  VkResult res = AllocateFromBlock(
10305  pCurrBlock,
10306  hCurrentPool,
10307  currentFrameIndex,
10308  size,
10309  alignment,
10310  allocFlagsCopy,
10311  createInfo.pUserData,
10312  suballocType,
10313  strategy,
10314  pAllocation);
10315  if(res == VK_SUCCESS)
10316  {
10317  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10318  return VK_SUCCESS;
10319  }
10320  }
10321  }
10322  else
10323  {
10325  {
10326  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10327  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10328  {
10329  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10330  VMA_ASSERT(pCurrBlock);
10331  VkResult res = AllocateFromBlock(
10332  pCurrBlock,
10333  hCurrentPool,
10334  currentFrameIndex,
10335  size,
10336  alignment,
10337  allocFlagsCopy,
10338  createInfo.pUserData,
10339  suballocType,
10340  strategy,
10341  pAllocation);
10342  if(res == VK_SUCCESS)
10343  {
10344  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10345  return VK_SUCCESS;
10346  }
10347  }
10348  }
10349  else // WORST_FIT, FIRST_FIT
10350  {
10351  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10352  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10353  {
10354  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10355  VMA_ASSERT(pCurrBlock);
10356  VkResult res = AllocateFromBlock(
10357  pCurrBlock,
10358  hCurrentPool,
10359  currentFrameIndex,
10360  size,
10361  alignment,
10362  allocFlagsCopy,
10363  createInfo.pUserData,
10364  suballocType,
10365  strategy,
10366  pAllocation);
10367  if(res == VK_SUCCESS)
10368  {
10369  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10370  return VK_SUCCESS;
10371  }
10372  }
10373  }
10374  }
10375 
10376  // 2. Try to create new block.
10377  if(canCreateNewBlock)
10378  {
10379  // Calculate optimal size for new block.
10380  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10381  uint32_t newBlockSizeShift = 0;
10382  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10383 
10384  if(!m_ExplicitBlockSize)
10385  {
10386  // Allocate 1/8, 1/4, 1/2 as first blocks.
10387  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10388  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10389  {
10390  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10391  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10392  {
10393  newBlockSize = smallerNewBlockSize;
10394  ++newBlockSizeShift;
10395  }
10396  else
10397  {
10398  break;
10399  }
10400  }
10401  }
10402 
10403  size_t newBlockIndex = 0;
10404  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10405  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10406  if(!m_ExplicitBlockSize)
10407  {
10408  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10409  {
10410  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10411  if(smallerNewBlockSize >= size)
10412  {
10413  newBlockSize = smallerNewBlockSize;
10414  ++newBlockSizeShift;
10415  res = CreateBlock(newBlockSize, &newBlockIndex);
10416  }
10417  else
10418  {
10419  break;
10420  }
10421  }
10422  }
10423 
10424  if(res == VK_SUCCESS)
10425  {
10426  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10427  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10428 
10429  res = AllocateFromBlock(
10430  pBlock,
10431  hCurrentPool,
10432  currentFrameIndex,
10433  size,
10434  alignment,
10435  allocFlagsCopy,
10436  createInfo.pUserData,
10437  suballocType,
10438  strategy,
10439  pAllocation);
10440  if(res == VK_SUCCESS)
10441  {
10442  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10443  return VK_SUCCESS;
10444  }
10445  else
10446  {
10447  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10449  }
10450  }
10451  }
10452  }
10453 
10454  // 3. Try to allocate from existing blocks with making other allocations lost.
10455  if(canMakeOtherLost)
10456  {
10457  uint32_t tryIndex = 0;
10458  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10459  {
10460  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10461  VmaAllocationRequest bestRequest = {};
10462  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10463 
10464  // 1. Search existing allocations.
10466  {
10467  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10468  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10469  {
10470  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10471  VMA_ASSERT(pCurrBlock);
10472  VmaAllocationRequest currRequest = {};
10473  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10474  currentFrameIndex,
10475  m_FrameInUseCount,
10476  m_BufferImageGranularity,
10477  size,
10478  alignment,
10479  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10480  suballocType,
10481  canMakeOtherLost,
10482  strategy,
10483  &currRequest))
10484  {
10485  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10486  if(pBestRequestBlock == VMA_NULL ||
10487  currRequestCost < bestRequestCost)
10488  {
10489  pBestRequestBlock = pCurrBlock;
10490  bestRequest = currRequest;
10491  bestRequestCost = currRequestCost;
10492 
10493  if(bestRequestCost == 0)
10494  {
10495  break;
10496  }
10497  }
10498  }
10499  }
10500  }
10501  else // WORST_FIT, FIRST_FIT
10502  {
10503  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10504  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10505  {
10506  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10507  VMA_ASSERT(pCurrBlock);
10508  VmaAllocationRequest currRequest = {};
10509  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10510  currentFrameIndex,
10511  m_FrameInUseCount,
10512  m_BufferImageGranularity,
10513  size,
10514  alignment,
10515  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10516  suballocType,
10517  canMakeOtherLost,
10518  strategy,
10519  &currRequest))
10520  {
10521  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10522  if(pBestRequestBlock == VMA_NULL ||
10523  currRequestCost < bestRequestCost ||
10525  {
10526  pBestRequestBlock = pCurrBlock;
10527  bestRequest = currRequest;
10528  bestRequestCost = currRequestCost;
10529 
10530  if(bestRequestCost == 0 ||
10532  {
10533  break;
10534  }
10535  }
10536  }
10537  }
10538  }
10539 
10540  if(pBestRequestBlock != VMA_NULL)
10541  {
10542  if(mapped)
10543  {
10544  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10545  if(res != VK_SUCCESS)
10546  {
10547  return res;
10548  }
10549  }
10550 
10551  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10552  currentFrameIndex,
10553  m_FrameInUseCount,
10554  &bestRequest))
10555  {
10556  // We no longer have an empty Allocation.
10557  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10558  {
10559  m_HasEmptyBlock = false;
10560  }
10561  // Allocate from this pBlock.
10562  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10563  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10564  (*pAllocation)->InitBlockAllocation(
10565  hCurrentPool,
10566  pBestRequestBlock,
10567  bestRequest.offset,
10568  alignment,
10569  size,
10570  suballocType,
10571  mapped,
10572  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10573  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10574  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10575  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10576  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10577  {
10578  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10579  }
10580  if(IsCorruptionDetectionEnabled())
10581  {
10582  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10583  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10584  }
10585  return VK_SUCCESS;
10586  }
10587  // else: Some allocations must have been touched while we are here. Next try.
10588  }
10589  else
10590  {
10591  // Could not find place in any of the blocks - break outer loop.
10592  break;
10593  }
10594  }
10595  /* Maximum number of tries exceeded - a very unlike event when many other
10596  threads are simultaneously touching allocations making it impossible to make
10597  lost at the same time as we try to allocate. */
10598  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10599  {
10600  return VK_ERROR_TOO_MANY_OBJECTS;
10601  }
10602  }
10603 
10604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10605 }
10606 
10607 void VmaBlockVector::Free(
10608  VmaAllocation hAllocation)
10609 {
10610  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10611 
10612  // Scope for lock.
10613  {
10614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10615 
10616  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10617 
10618  if(IsCorruptionDetectionEnabled())
10619  {
10620  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10621  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10622  }
10623 
10624  if(hAllocation->IsPersistentMap())
10625  {
10626  pBlock->Unmap(m_hAllocator, 1);
10627  }
10628 
10629  pBlock->m_pMetadata->Free(hAllocation);
10630  VMA_HEAVY_ASSERT(pBlock->Validate());
10631 
10632  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10633 
10634  // pBlock became empty after this deallocation.
10635  if(pBlock->m_pMetadata->IsEmpty())
10636  {
10637  // Already has empty Allocation. We don't want to have two, so delete this one.
10638  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10639  {
10640  pBlockToDelete = pBlock;
10641  Remove(pBlock);
10642  }
10643  // We now have first empty block.
10644  else
10645  {
10646  m_HasEmptyBlock = true;
10647  }
10648  }
10649  // pBlock didn't become empty, but we have another empty block - find and free that one.
10650  // (This is optional, heuristics.)
10651  else if(m_HasEmptyBlock)
10652  {
10653  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10654  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10655  {
10656  pBlockToDelete = pLastBlock;
10657  m_Blocks.pop_back();
10658  m_HasEmptyBlock = false;
10659  }
10660  }
10661 
10662  IncrementallySortBlocks();
10663  }
10664 
10665  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10666  // lock, for performance reason.
10667  if(pBlockToDelete != VMA_NULL)
10668  {
10669  VMA_DEBUG_LOG(" Deleted empty allocation");
10670  pBlockToDelete->Destroy(m_hAllocator);
10671  vma_delete(m_hAllocator, pBlockToDelete);
10672  }
10673 }
10674 
10675 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10676 {
10677  VkDeviceSize result = 0;
10678  for(size_t i = m_Blocks.size(); i--; )
10679  {
10680  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10681  if(result >= m_PreferredBlockSize)
10682  {
10683  break;
10684  }
10685  }
10686  return result;
10687 }
10688 
10689 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10690 {
10691  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10692  {
10693  if(m_Blocks[blockIndex] == pBlock)
10694  {
10695  VmaVectorRemove(m_Blocks, blockIndex);
10696  return;
10697  }
10698  }
10699  VMA_ASSERT(0);
10700 }
10701 
10702 void VmaBlockVector::IncrementallySortBlocks()
10703 {
10704  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10705  {
10706  // Bubble sort only until first swap.
10707  for(size_t i = 1; i < m_Blocks.size(); ++i)
10708  {
10709  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10710  {
10711  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10712  return;
10713  }
10714  }
10715  }
10716 }
10717 
10718 VkResult VmaBlockVector::AllocateFromBlock(
10719  VmaDeviceMemoryBlock* pBlock,
10720  VmaPool hCurrentPool,
10721  uint32_t currentFrameIndex,
10722  VkDeviceSize size,
10723  VkDeviceSize alignment,
10724  VmaAllocationCreateFlags allocFlags,
10725  void* pUserData,
10726  VmaSuballocationType suballocType,
10727  uint32_t strategy,
10728  VmaAllocation* pAllocation)
10729 {
10730  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10731  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10732  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10733  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10734 
10735  VmaAllocationRequest currRequest = {};
10736  if(pBlock->m_pMetadata->CreateAllocationRequest(
10737  currentFrameIndex,
10738  m_FrameInUseCount,
10739  m_BufferImageGranularity,
10740  size,
10741  alignment,
10742  isUpperAddress,
10743  suballocType,
10744  false, // canMakeOtherLost
10745  strategy,
10746  &currRequest))
10747  {
10748  // Allocate from pCurrBlock.
10749  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10750 
10751  if(mapped)
10752  {
10753  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10754  if(res != VK_SUCCESS)
10755  {
10756  return res;
10757  }
10758  }
10759 
10760  // We no longer have an empty Allocation.
10761  if(pBlock->m_pMetadata->IsEmpty())
10762  {
10763  m_HasEmptyBlock = false;
10764  }
10765 
10766  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10767  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10768  (*pAllocation)->InitBlockAllocation(
10769  hCurrentPool,
10770  pBlock,
10771  currRequest.offset,
10772  alignment,
10773  size,
10774  suballocType,
10775  mapped,
10776  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10777  VMA_HEAVY_ASSERT(pBlock->Validate());
10778  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10779  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10780  {
10781  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10782  }
10783  if(IsCorruptionDetectionEnabled())
10784  {
10785  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10786  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10787  }
10788  return VK_SUCCESS;
10789  }
10790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10791 }
10792 
10793 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10794 {
10795  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10796  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10797  allocInfo.allocationSize = blockSize;
10798  VkDeviceMemory mem = VK_NULL_HANDLE;
10799  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10800  if(res < 0)
10801  {
10802  return res;
10803  }
10804 
10805  // New VkDeviceMemory successfully created.
10806 
10807  // Create new Allocation for it.
10808  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10809  pBlock->Init(
10810  m_hAllocator,
10811  m_MemoryTypeIndex,
10812  mem,
10813  allocInfo.allocationSize,
10814  m_NextBlockId++,
10815  m_Algorithm);
10816 
10817  m_Blocks.push_back(pBlock);
10818  if(pNewBlockIndex != VMA_NULL)
10819  {
10820  *pNewBlockIndex = m_Blocks.size() - 1;
10821  }
10822 
10823  return VK_SUCCESS;
10824 }
10825 
10826 #if VMA_STATS_STRING_ENABLED
10827 
10828 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10829 {
10830  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10831 
10832  json.BeginObject();
10833 
10834  if(m_IsCustomPool)
10835  {
10836  json.WriteString("MemoryTypeIndex");
10837  json.WriteNumber(m_MemoryTypeIndex);
10838 
10839  json.WriteString("BlockSize");
10840  json.WriteNumber(m_PreferredBlockSize);
10841 
10842  json.WriteString("BlockCount");
10843  json.BeginObject(true);
10844  if(m_MinBlockCount > 0)
10845  {
10846  json.WriteString("Min");
10847  json.WriteNumber((uint64_t)m_MinBlockCount);
10848  }
10849  if(m_MaxBlockCount < SIZE_MAX)
10850  {
10851  json.WriteString("Max");
10852  json.WriteNumber((uint64_t)m_MaxBlockCount);
10853  }
10854  json.WriteString("Cur");
10855  json.WriteNumber((uint64_t)m_Blocks.size());
10856  json.EndObject();
10857 
10858  if(m_FrameInUseCount > 0)
10859  {
10860  json.WriteString("FrameInUseCount");
10861  json.WriteNumber(m_FrameInUseCount);
10862  }
10863 
10864  if(m_Algorithm != 0)
10865  {
10866  json.WriteString("Algorithm");
10867  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10868  }
10869  }
10870  else
10871  {
10872  json.WriteString("PreferredBlockSize");
10873  json.WriteNumber(m_PreferredBlockSize);
10874  }
10875 
10876  json.WriteString("Blocks");
10877  json.BeginObject();
10878  for(size_t i = 0; i < m_Blocks.size(); ++i)
10879  {
10880  json.BeginString();
10881  json.ContinueString(m_Blocks[i]->GetId());
10882  json.EndString();
10883 
10884  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10885  }
10886  json.EndObject();
10887 
10888  json.EndObject();
10889 }
10890 
10891 #endif // #if VMA_STATS_STRING_ENABLED
10892 
10893 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10894  VmaAllocator hAllocator,
10895  uint32_t currentFrameIndex)
10896 {
10897  if(m_pDefragmentator == VMA_NULL)
10898  {
10899  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10900  hAllocator,
10901  this,
10902  currentFrameIndex);
10903  }
10904 
10905  return m_pDefragmentator;
10906 }
10907 
10908 VkResult VmaBlockVector::Defragment(
10909  VmaDefragmentationStats* pDefragmentationStats,
10910  VkDeviceSize& maxBytesToMove,
10911  uint32_t& maxAllocationsToMove)
10912 {
10913  if(m_pDefragmentator == VMA_NULL)
10914  {
10915  return VK_SUCCESS;
10916  }
10917 
10918  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10919 
10920  // Defragment.
10921  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10922 
10923  // Accumulate statistics.
10924  if(pDefragmentationStats != VMA_NULL)
10925  {
10926  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10927  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10928  pDefragmentationStats->bytesMoved += bytesMoved;
10929  pDefragmentationStats->allocationsMoved += allocationsMoved;
10930  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10931  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10932  maxBytesToMove -= bytesMoved;
10933  maxAllocationsToMove -= allocationsMoved;
10934  }
10935 
10936  // Free empty blocks.
10937  m_HasEmptyBlock = false;
10938  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10939  {
10940  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10941  if(pBlock->m_pMetadata->IsEmpty())
10942  {
10943  if(m_Blocks.size() > m_MinBlockCount)
10944  {
10945  if(pDefragmentationStats != VMA_NULL)
10946  {
10947  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10948  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10949  }
10950 
10951  VmaVectorRemove(m_Blocks, blockIndex);
10952  pBlock->Destroy(m_hAllocator);
10953  vma_delete(m_hAllocator, pBlock);
10954  }
10955  else
10956  {
10957  m_HasEmptyBlock = true;
10958  }
10959  }
10960  }
10961 
10962  return result;
10963 }
10964 
10965 void VmaBlockVector::DestroyDefragmentator()
10966 {
10967  if(m_pDefragmentator != VMA_NULL)
10968  {
10969  vma_delete(m_hAllocator, m_pDefragmentator);
10970  m_pDefragmentator = VMA_NULL;
10971  }
10972 }
10973 
10974 void VmaBlockVector::MakePoolAllocationsLost(
10975  uint32_t currentFrameIndex,
10976  size_t* pLostAllocationCount)
10977 {
10978  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10979  size_t lostAllocationCount = 0;
10980  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10981  {
10982  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10983  VMA_ASSERT(pBlock);
10984  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10985  }
10986  if(pLostAllocationCount != VMA_NULL)
10987  {
10988  *pLostAllocationCount = lostAllocationCount;
10989  }
10990 }
10991 
10992 VkResult VmaBlockVector::CheckCorruption()
10993 {
10994  if(!IsCorruptionDetectionEnabled())
10995  {
10996  return VK_ERROR_FEATURE_NOT_PRESENT;
10997  }
10998 
10999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11000  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11001  {
11002  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11003  VMA_ASSERT(pBlock);
11004  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11005  if(res != VK_SUCCESS)
11006  {
11007  return res;
11008  }
11009  }
11010  return VK_SUCCESS;
11011 }
11012 
11013 void VmaBlockVector::AddStats(VmaStats* pStats)
11014 {
11015  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11016  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11017 
11018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11019 
11020  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11021  {
11022  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11023  VMA_ASSERT(pBlock);
11024  VMA_HEAVY_ASSERT(pBlock->Validate());
11025  VmaStatInfo allocationStatInfo;
11026  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11027  VmaAddStatInfo(pStats->total, allocationStatInfo);
11028  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11029  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11030  }
11031 }
11032 
11034 // VmaDefragmentator members definition
11035 
11036 VmaDefragmentator::VmaDefragmentator(
11037  VmaAllocator hAllocator,
11038  VmaBlockVector* pBlockVector,
11039  uint32_t currentFrameIndex) :
11040  m_hAllocator(hAllocator),
11041  m_pBlockVector(pBlockVector),
11042  m_CurrentFrameIndex(currentFrameIndex),
11043  m_BytesMoved(0),
11044  m_AllocationsMoved(0),
11045  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11046  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11047 {
11048  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11049 }
11050 
11051 VmaDefragmentator::~VmaDefragmentator()
11052 {
11053  for(size_t i = m_Blocks.size(); i--; )
11054  {
11055  vma_delete(m_hAllocator, m_Blocks[i]);
11056  }
11057 }
11058 
11059 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11060 {
11061  AllocationInfo allocInfo;
11062  allocInfo.m_hAllocation = hAlloc;
11063  allocInfo.m_pChanged = pChanged;
11064  m_Allocations.push_back(allocInfo);
11065 }
11066 
11067 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11068 {
11069  // It has already been mapped for defragmentation.
11070  if(m_pMappedDataForDefragmentation)
11071  {
11072  *ppMappedData = m_pMappedDataForDefragmentation;
11073  return VK_SUCCESS;
11074  }
11075 
11076  // It is originally mapped.
11077  if(m_pBlock->GetMappedData())
11078  {
11079  *ppMappedData = m_pBlock->GetMappedData();
11080  return VK_SUCCESS;
11081  }
11082 
11083  // Map on first usage.
11084  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11085  *ppMappedData = m_pMappedDataForDefragmentation;
11086  return res;
11087 }
11088 
11089 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11090 {
11091  if(m_pMappedDataForDefragmentation != VMA_NULL)
11092  {
11093  m_pBlock->Unmap(hAllocator, 1);
11094  }
11095 }
11096 
11097 VkResult VmaDefragmentator::DefragmentRound(
11098  VkDeviceSize maxBytesToMove,
11099  uint32_t maxAllocationsToMove)
11100 {
11101  if(m_Blocks.empty())
11102  {
11103  return VK_SUCCESS;
11104  }
11105 
11106  size_t srcBlockIndex = m_Blocks.size() - 1;
11107  size_t srcAllocIndex = SIZE_MAX;
11108  for(;;)
11109  {
11110  // 1. Find next allocation to move.
11111  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11112  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11113  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11114  {
11115  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11116  {
11117  // Finished: no more allocations to process.
11118  if(srcBlockIndex == 0)
11119  {
11120  return VK_SUCCESS;
11121  }
11122  else
11123  {
11124  --srcBlockIndex;
11125  srcAllocIndex = SIZE_MAX;
11126  }
11127  }
11128  else
11129  {
11130  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11131  }
11132  }
11133 
11134  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11135  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11136 
11137  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11138  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11139  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11140  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11141 
11142  // 2. Try to find new place for this allocation in preceding or current block.
11143  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11144  {
11145  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11146  VmaAllocationRequest dstAllocRequest;
11147  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11148  m_CurrentFrameIndex,
11149  m_pBlockVector->GetFrameInUseCount(),
11150  m_pBlockVector->GetBufferImageGranularity(),
11151  size,
11152  alignment,
11153  false, // upperAddress
11154  suballocType,
11155  false, // canMakeOtherLost
11157  &dstAllocRequest) &&
11158  MoveMakesSense(
11159  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11160  {
11161  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11162 
11163  // Reached limit on number of allocations or bytes to move.
11164  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11165  (m_BytesMoved + size > maxBytesToMove))
11166  {
11167  return VK_INCOMPLETE;
11168  }
11169 
11170  void* pDstMappedData = VMA_NULL;
11171  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11172  if(res != VK_SUCCESS)
11173  {
11174  return res;
11175  }
11176 
11177  void* pSrcMappedData = VMA_NULL;
11178  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11179  if(res != VK_SUCCESS)
11180  {
11181  return res;
11182  }
11183 
11184  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11185  memcpy(
11186  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11187  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11188  static_cast<size_t>(size));
11189 
11190  if(VMA_DEBUG_MARGIN > 0)
11191  {
11192  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11193  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11194  }
11195 
11196  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11197  dstAllocRequest,
11198  suballocType,
11199  size,
11200  false, // upperAddress
11201  allocInfo.m_hAllocation);
11202  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11203 
11204  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11205 
11206  if(allocInfo.m_pChanged != VMA_NULL)
11207  {
11208  *allocInfo.m_pChanged = VK_TRUE;
11209  }
11210 
11211  ++m_AllocationsMoved;
11212  m_BytesMoved += size;
11213 
11214  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11215 
11216  break;
11217  }
11218  }
11219 
11220  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11221 
11222  if(srcAllocIndex > 0)
11223  {
11224  --srcAllocIndex;
11225  }
11226  else
11227  {
11228  if(srcBlockIndex > 0)
11229  {
11230  --srcBlockIndex;
11231  srcAllocIndex = SIZE_MAX;
11232  }
11233  else
11234  {
11235  return VK_SUCCESS;
11236  }
11237  }
11238  }
11239 }
11240 
11241 VkResult VmaDefragmentator::Defragment(
11242  VkDeviceSize maxBytesToMove,
11243  uint32_t maxAllocationsToMove)
11244 {
11245  if(m_Allocations.empty())
11246  {
11247  return VK_SUCCESS;
11248  }
11249 
11250  // Create block info for each block.
11251  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11253  {
11254  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11255  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11256  m_Blocks.push_back(pBlockInfo);
11257  }
11258 
11259  // Sort them by m_pBlock pointer value.
11260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11261 
11262  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11263  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11264  {
11265  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11266  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11267  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11268  {
11269  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11270  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11271  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11272  {
11273  (*it)->m_Allocations.push_back(allocInfo);
11274  }
11275  else
11276  {
11277  VMA_ASSERT(0);
11278  }
11279  }
11280  }
11281  m_Allocations.clear();
11282 
11283  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11284  {
11285  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11286  pBlockInfo->CalcHasNonMovableAllocations();
11287  pBlockInfo->SortAllocationsBySizeDescecnding();
11288  }
11289 
11290  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11292 
11293  // Execute defragmentation rounds (the main part).
11294  VkResult result = VK_SUCCESS;
11295  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11296  {
11297  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11298  }
11299 
11300  // Unmap blocks that were mapped for defragmentation.
11301  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11302  {
11303  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11304  }
11305 
11306  return result;
11307 }
11308 
11309 bool VmaDefragmentator::MoveMakesSense(
11310  size_t dstBlockIndex, VkDeviceSize dstOffset,
11311  size_t srcBlockIndex, VkDeviceSize srcOffset)
11312 {
11313  if(dstBlockIndex < srcBlockIndex)
11314  {
11315  return true;
11316  }
11317  if(dstBlockIndex > srcBlockIndex)
11318  {
11319  return false;
11320  }
11321  if(dstOffset < srcOffset)
11322  {
11323  return true;
11324  }
11325  return false;
11326 }
11327 
11329 // VmaRecorder
11330 
11331 #if VMA_RECORDING_ENABLED
11332 
11333 VmaRecorder::VmaRecorder() :
11334  m_UseMutex(true),
11335  m_Flags(0),
11336  m_File(VMA_NULL),
11337  m_Freq(INT64_MAX),
11338  m_StartCounter(INT64_MAX)
11339 {
11340 }
11341 
11342 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11343 {
11344  m_UseMutex = useMutex;
11345  m_Flags = settings.flags;
11346 
11347  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11348  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11349 
11350  // Open file for writing.
11351  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11352  if(err != 0)
11353  {
11354  return VK_ERROR_INITIALIZATION_FAILED;
11355  }
11356 
11357  // Write header.
11358  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11359  fprintf(m_File, "%s\n", "1,3");
11360 
11361  return VK_SUCCESS;
11362 }
11363 
11364 VmaRecorder::~VmaRecorder()
11365 {
11366  if(m_File != VMA_NULL)
11367  {
11368  fclose(m_File);
11369  }
11370 }
11371 
11372 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11373 {
11374  CallParams callParams;
11375  GetBasicParams(callParams);
11376 
11377  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11378  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11379  Flush();
11380 }
11381 
11382 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11383 {
11384  CallParams callParams;
11385  GetBasicParams(callParams);
11386 
11387  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11388  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11389  Flush();
11390 }
11391 
11392 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11393 {
11394  CallParams callParams;
11395  GetBasicParams(callParams);
11396 
11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11398  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11399  createInfo.memoryTypeIndex,
11400  createInfo.flags,
11401  createInfo.blockSize,
11402  (uint64_t)createInfo.minBlockCount,
11403  (uint64_t)createInfo.maxBlockCount,
11404  createInfo.frameInUseCount,
11405  pool);
11406  Flush();
11407 }
11408 
11409 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11410 {
11411  CallParams callParams;
11412  GetBasicParams(callParams);
11413 
11414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11415  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11416  pool);
11417  Flush();
11418 }
11419 
11420 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11421  const VkMemoryRequirements& vkMemReq,
11422  const VmaAllocationCreateInfo& createInfo,
11423  VmaAllocation allocation)
11424 {
11425  CallParams callParams;
11426  GetBasicParams(callParams);
11427 
11428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11429  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11430  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11431  vkMemReq.size,
11432  vkMemReq.alignment,
11433  vkMemReq.memoryTypeBits,
11434  createInfo.flags,
11435  createInfo.usage,
11436  createInfo.requiredFlags,
11437  createInfo.preferredFlags,
11438  createInfo.memoryTypeBits,
11439  createInfo.pool,
11440  allocation,
11441  userDataStr.GetString());
11442  Flush();
11443 }
11444 
11445 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11446  const VkMemoryRequirements& vkMemReq,
11447  bool requiresDedicatedAllocation,
11448  bool prefersDedicatedAllocation,
11449  const VmaAllocationCreateInfo& createInfo,
11450  VmaAllocation allocation)
11451 {
11452  CallParams callParams;
11453  GetBasicParams(callParams);
11454 
11455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11456  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11457  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11458  vkMemReq.size,
11459  vkMemReq.alignment,
11460  vkMemReq.memoryTypeBits,
11461  requiresDedicatedAllocation ? 1 : 0,
11462  prefersDedicatedAllocation ? 1 : 0,
11463  createInfo.flags,
11464  createInfo.usage,
11465  createInfo.requiredFlags,
11466  createInfo.preferredFlags,
11467  createInfo.memoryTypeBits,
11468  createInfo.pool,
11469  allocation,
11470  userDataStr.GetString());
11471  Flush();
11472 }
11473 
11474 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11475  const VkMemoryRequirements& vkMemReq,
11476  bool requiresDedicatedAllocation,
11477  bool prefersDedicatedAllocation,
11478  const VmaAllocationCreateInfo& createInfo,
11479  VmaAllocation allocation)
11480 {
11481  CallParams callParams;
11482  GetBasicParams(callParams);
11483 
11484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11487  vkMemReq.size,
11488  vkMemReq.alignment,
11489  vkMemReq.memoryTypeBits,
11490  requiresDedicatedAllocation ? 1 : 0,
11491  prefersDedicatedAllocation ? 1 : 0,
11492  createInfo.flags,
11493  createInfo.usage,
11494  createInfo.requiredFlags,
11495  createInfo.preferredFlags,
11496  createInfo.memoryTypeBits,
11497  createInfo.pool,
11498  allocation,
11499  userDataStr.GetString());
11500  Flush();
11501 }
11502 
11503 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11504  VmaAllocation allocation)
11505 {
11506  CallParams callParams;
11507  GetBasicParams(callParams);
11508 
11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11510  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11511  allocation);
11512  Flush();
11513 }
11514 
11515 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11516  VmaAllocation allocation,
11517  const void* pUserData)
11518 {
11519  CallParams callParams;
11520  GetBasicParams(callParams);
11521 
11522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11523  UserDataString userDataStr(
11524  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11525  pUserData);
11526  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11527  allocation,
11528  userDataStr.GetString());
11529  Flush();
11530 }
11531 
11532 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11533  VmaAllocation allocation)
11534 {
11535  CallParams callParams;
11536  GetBasicParams(callParams);
11537 
11538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11539  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11540  allocation);
11541  Flush();
11542 }
11543 
11544 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11545  VmaAllocation allocation)
11546 {
11547  CallParams callParams;
11548  GetBasicParams(callParams);
11549 
11550  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11551  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11552  allocation);
11553  Flush();
11554 }
11555 
11556 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11557  VmaAllocation allocation)
11558 {
11559  CallParams callParams;
11560  GetBasicParams(callParams);
11561 
11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11563  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11564  allocation);
11565  Flush();
11566 }
11567 
11568 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11569  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11570 {
11571  CallParams callParams;
11572  GetBasicParams(callParams);
11573 
11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11575  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11576  allocation,
11577  offset,
11578  size);
11579  Flush();
11580 }
11581 
11582 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11584 {
11585  CallParams callParams;
11586  GetBasicParams(callParams);
11587 
11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11589  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11590  allocation,
11591  offset,
11592  size);
11593  Flush();
11594 }
11595 
11596 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11597  const VkBufferCreateInfo& bufCreateInfo,
11598  const VmaAllocationCreateInfo& allocCreateInfo,
11599  VmaAllocation allocation)
11600 {
11601  CallParams callParams;
11602  GetBasicParams(callParams);
11603 
11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11605  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11606  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11607  bufCreateInfo.flags,
11608  bufCreateInfo.size,
11609  bufCreateInfo.usage,
11610  bufCreateInfo.sharingMode,
11611  allocCreateInfo.flags,
11612  allocCreateInfo.usage,
11613  allocCreateInfo.requiredFlags,
11614  allocCreateInfo.preferredFlags,
11615  allocCreateInfo.memoryTypeBits,
11616  allocCreateInfo.pool,
11617  allocation,
11618  userDataStr.GetString());
11619  Flush();
11620 }
11621 
11622 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11623  const VkImageCreateInfo& imageCreateInfo,
11624  const VmaAllocationCreateInfo& allocCreateInfo,
11625  VmaAllocation allocation)
11626 {
11627  CallParams callParams;
11628  GetBasicParams(callParams);
11629 
11630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11631  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11632  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11633  imageCreateInfo.flags,
11634  imageCreateInfo.imageType,
11635  imageCreateInfo.format,
11636  imageCreateInfo.extent.width,
11637  imageCreateInfo.extent.height,
11638  imageCreateInfo.extent.depth,
11639  imageCreateInfo.mipLevels,
11640  imageCreateInfo.arrayLayers,
11641  imageCreateInfo.samples,
11642  imageCreateInfo.tiling,
11643  imageCreateInfo.usage,
11644  imageCreateInfo.sharingMode,
11645  imageCreateInfo.initialLayout,
11646  allocCreateInfo.flags,
11647  allocCreateInfo.usage,
11648  allocCreateInfo.requiredFlags,
11649  allocCreateInfo.preferredFlags,
11650  allocCreateInfo.memoryTypeBits,
11651  allocCreateInfo.pool,
11652  allocation,
11653  userDataStr.GetString());
11654  Flush();
11655 }
11656 
11657 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11658  VmaAllocation allocation)
11659 {
11660  CallParams callParams;
11661  GetBasicParams(callParams);
11662 
11663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11665  allocation);
11666  Flush();
11667 }
11668 
11669 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11670  VmaAllocation allocation)
11671 {
11672  CallParams callParams;
11673  GetBasicParams(callParams);
11674 
11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11676  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11677  allocation);
11678  Flush();
11679 }
11680 
11681 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11682  VmaAllocation allocation)
11683 {
11684  CallParams callParams;
11685  GetBasicParams(callParams);
11686 
11687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11688  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11689  allocation);
11690  Flush();
11691 }
11692 
11693 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11694  VmaAllocation allocation)
11695 {
11696  CallParams callParams;
11697  GetBasicParams(callParams);
11698 
11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11700  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11701  allocation);
11702  Flush();
11703 }
11704 
11705 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11706  VmaPool pool)
11707 {
11708  CallParams callParams;
11709  GetBasicParams(callParams);
11710 
11711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11712  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11713  pool);
11714  Flush();
11715 }
11716 
11717 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11718 {
11719  if(pUserData != VMA_NULL)
11720  {
11721  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11722  {
11723  m_Str = (const char*)pUserData;
11724  }
11725  else
11726  {
11727  sprintf_s(m_PtrStr, "%p", pUserData);
11728  m_Str = m_PtrStr;
11729  }
11730  }
11731  else
11732  {
11733  m_Str = "";
11734  }
11735 }
11736 
11737 void VmaRecorder::WriteConfiguration(
11738  const VkPhysicalDeviceProperties& devProps,
11739  const VkPhysicalDeviceMemoryProperties& memProps,
11740  bool dedicatedAllocationExtensionEnabled)
11741 {
11742  fprintf(m_File, "Config,Begin\n");
11743 
11744  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11745  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11746  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11747  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11748  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11749  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11750 
11751  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11752  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11753  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11754 
11755  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11756  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11757  {
11758  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11759  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11760  }
11761  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11762  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11763  {
11764  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11765  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11766  }
11767 
11768  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11769 
11770  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11771  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11772  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11773  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11774  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11775  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11776  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11777  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11778  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11779 
11780  fprintf(m_File, "Config,End\n");
11781 }
11782 
11783 void VmaRecorder::GetBasicParams(CallParams& outParams)
11784 {
11785  outParams.threadId = GetCurrentThreadId();
11786 
11787  LARGE_INTEGER counter;
11788  QueryPerformanceCounter(&counter);
11789  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11790 }
11791 
11792 void VmaRecorder::Flush()
11793 {
11794  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11795  {
11796  fflush(m_File);
11797  }
11798 }
11799 
11800 #endif // #if VMA_RECORDING_ENABLED
11801 
11803 // VmaAllocator_T
11804 
11805 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11806  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11807  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11808  m_hDevice(pCreateInfo->device),
11809  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11810  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11811  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11812  m_PreferredLargeHeapBlockSize(0),
11813  m_PhysicalDevice(pCreateInfo->physicalDevice),
11814  m_CurrentFrameIndex(0),
11815  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11816  m_NextPoolId(0)
11818  ,m_pRecorder(VMA_NULL)
11819 #endif
11820 {
11821  if(VMA_DEBUG_DETECT_CORRUPTION)
11822  {
11823  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11824  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11825  }
11826 
11827  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11828 
11829 #if !(VMA_DEDICATED_ALLOCATION)
11831  {
11832  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11833  }
11834 #endif
11835 
11836  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11837  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11838  memset(&m_MemProps, 0, sizeof(m_MemProps));
11839 
11840  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11841  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11842 
11843  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11844  {
11845  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11846  }
11847 
11848  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11849  {
11850  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11851  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11852  }
11853 
11854  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11855 
11856  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11857  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11858 
11859  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11860  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11861  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11862  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11863 
11864  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11865  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11866 
11867  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11868  {
11869  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11870  {
11871  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11872  if(limit != VK_WHOLE_SIZE)
11873  {
11874  m_HeapSizeLimit[heapIndex] = limit;
11875  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11876  {
11877  m_MemProps.memoryHeaps[heapIndex].size = limit;
11878  }
11879  }
11880  }
11881  }
11882 
11883  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11884  {
11885  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11886 
11887  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11888  this,
11889  memTypeIndex,
11890  preferredBlockSize,
11891  0,
11892  SIZE_MAX,
11893  GetBufferImageGranularity(),
11894  pCreateInfo->frameInUseCount,
11895  false, // isCustomPool
11896  false, // explicitBlockSize
11897  false); // linearAlgorithm
11898  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11899  // becase minBlockCount is 0.
11900  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11901 
11902  }
11903 }
11904 
11905 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11906 {
11907  VkResult res = VK_SUCCESS;
11908 
11909  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11910  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11911  {
11912 #if VMA_RECORDING_ENABLED
11913  m_pRecorder = vma_new(this, VmaRecorder)();
11914  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11915  if(res != VK_SUCCESS)
11916  {
11917  return res;
11918  }
11919  m_pRecorder->WriteConfiguration(
11920  m_PhysicalDeviceProperties,
11921  m_MemProps,
11922  m_UseKhrDedicatedAllocation);
11923  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11924 #else
11925  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11926  return VK_ERROR_FEATURE_NOT_PRESENT;
11927 #endif
11928  }
11929 
11930  return res;
11931 }
11932 
11933 VmaAllocator_T::~VmaAllocator_T()
11934 {
11935 #if VMA_RECORDING_ENABLED
11936  if(m_pRecorder != VMA_NULL)
11937  {
11938  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11939  vma_delete(this, m_pRecorder);
11940  }
11941 #endif
11942 
11943  VMA_ASSERT(m_Pools.empty());
11944 
11945  for(size_t i = GetMemoryTypeCount(); i--; )
11946  {
11947  vma_delete(this, m_pDedicatedAllocations[i]);
11948  vma_delete(this, m_pBlockVectors[i]);
11949  }
11950 }
11951 
11952 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11953 {
11954 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11955  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11956  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11957  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11958  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11959  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11960  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11961  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11962  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11963  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11964  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11965  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11966  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11967  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11968  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11969  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11970  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11971 #if VMA_DEDICATED_ALLOCATION
11972  if(m_UseKhrDedicatedAllocation)
11973  {
11974  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11975  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11976  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11977  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11978  }
11979 #endif // #if VMA_DEDICATED_ALLOCATION
11980 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11981 
11982 #define VMA_COPY_IF_NOT_NULL(funcName) \
11983  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11984 
11985  if(pVulkanFunctions != VMA_NULL)
11986  {
11987  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11988  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11989  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11990  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11991  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11992  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11993  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11994  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11995  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11996  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11997  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11998  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11999  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12000  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12001  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12002  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12003 #if VMA_DEDICATED_ALLOCATION
12004  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12005  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12006 #endif
12007  }
12008 
12009 #undef VMA_COPY_IF_NOT_NULL
12010 
12011  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12012  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12013  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12014  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12015  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12016  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12017  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12018  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12019  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12020  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12021  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12022  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12025  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12026  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12027  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12028  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12029 #if VMA_DEDICATED_ALLOCATION
12030  if(m_UseKhrDedicatedAllocation)
12031  {
12032  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12033  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12034  }
12035 #endif
12036 }
12037 
12038 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12039 {
12040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12041  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12042  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12043  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12044 }
12045 
12046 VkResult VmaAllocator_T::AllocateMemoryOfType(
12047  VkDeviceSize size,
12048  VkDeviceSize alignment,
12049  bool dedicatedAllocation,
12050  VkBuffer dedicatedBuffer,
12051  VkImage dedicatedImage,
12052  const VmaAllocationCreateInfo& createInfo,
12053  uint32_t memTypeIndex,
12054  VmaSuballocationType suballocType,
12055  VmaAllocation* pAllocation)
12056 {
12057  VMA_ASSERT(pAllocation != VMA_NULL);
12058  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12059 
12060  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12061 
12062  // If memory type is not HOST_VISIBLE, disable MAPPED.
12063  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12064  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12065  {
12066  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12067  }
12068 
12069  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12070  VMA_ASSERT(blockVector);
12071 
12072  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12073  bool preferDedicatedMemory =
12074  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12075  dedicatedAllocation ||
12076  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12077  size > preferredBlockSize / 2;
12078 
12079  if(preferDedicatedMemory &&
12080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12081  finalCreateInfo.pool == VK_NULL_HANDLE)
12082  {
12084  }
12085 
12086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12087  {
12088  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12089  {
12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12091  }
12092  else
12093  {
12094  return AllocateDedicatedMemory(
12095  size,
12096  suballocType,
12097  memTypeIndex,
12098  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12099  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12100  finalCreateInfo.pUserData,
12101  dedicatedBuffer,
12102  dedicatedImage,
12103  pAllocation);
12104  }
12105  }
12106  else
12107  {
12108  VkResult res = blockVector->Allocate(
12109  VK_NULL_HANDLE, // hCurrentPool
12110  m_CurrentFrameIndex.load(),
12111  size,
12112  alignment,
12113  finalCreateInfo,
12114  suballocType,
12115  pAllocation);
12116  if(res == VK_SUCCESS)
12117  {
12118  return res;
12119  }
12120 
12121  // 5. Try dedicated memory.
12122  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12123  {
12124  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12125  }
12126  else
12127  {
12128  res = AllocateDedicatedMemory(
12129  size,
12130  suballocType,
12131  memTypeIndex,
12132  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12133  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12134  finalCreateInfo.pUserData,
12135  dedicatedBuffer,
12136  dedicatedImage,
12137  pAllocation);
12138  if(res == VK_SUCCESS)
12139  {
12140  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12141  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12142  return VK_SUCCESS;
12143  }
12144  else
12145  {
12146  // Everything failed: Return error code.
12147  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12148  return res;
12149  }
12150  }
12151  }
12152 }
12153 
12154 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12155  VkDeviceSize size,
12156  VmaSuballocationType suballocType,
12157  uint32_t memTypeIndex,
12158  bool map,
12159  bool isUserDataString,
12160  void* pUserData,
12161  VkBuffer dedicatedBuffer,
12162  VkImage dedicatedImage,
12163  VmaAllocation* pAllocation)
12164 {
12165  VMA_ASSERT(pAllocation);
12166 
12167  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12168  allocInfo.memoryTypeIndex = memTypeIndex;
12169  allocInfo.allocationSize = size;
12170 
12171 #if VMA_DEDICATED_ALLOCATION
12172  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12173  if(m_UseKhrDedicatedAllocation)
12174  {
12175  if(dedicatedBuffer != VK_NULL_HANDLE)
12176  {
12177  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12178  dedicatedAllocInfo.buffer = dedicatedBuffer;
12179  allocInfo.pNext = &dedicatedAllocInfo;
12180  }
12181  else if(dedicatedImage != VK_NULL_HANDLE)
12182  {
12183  dedicatedAllocInfo.image = dedicatedImage;
12184  allocInfo.pNext = &dedicatedAllocInfo;
12185  }
12186  }
12187 #endif // #if VMA_DEDICATED_ALLOCATION
12188 
12189  // Allocate VkDeviceMemory.
12190  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12191  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12192  if(res < 0)
12193  {
12194  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12195  return res;
12196  }
12197 
12198  void* pMappedData = VMA_NULL;
12199  if(map)
12200  {
12201  res = (*m_VulkanFunctions.vkMapMemory)(
12202  m_hDevice,
12203  hMemory,
12204  0,
12205  VK_WHOLE_SIZE,
12206  0,
12207  &pMappedData);
12208  if(res < 0)
12209  {
12210  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12211  FreeVulkanMemory(memTypeIndex, size, hMemory);
12212  return res;
12213  }
12214  }
12215 
12216  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12217  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12218  (*pAllocation)->SetUserData(this, pUserData);
12219  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12220  {
12221  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12222  }
12223 
12224  // Register it in m_pDedicatedAllocations.
12225  {
12226  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12227  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12228  VMA_ASSERT(pDedicatedAllocations);
12229  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12230  }
12231 
12232  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12233 
12234  return VK_SUCCESS;
12235 }
12236 
12237 void VmaAllocator_T::GetBufferMemoryRequirements(
12238  VkBuffer hBuffer,
12239  VkMemoryRequirements& memReq,
12240  bool& requiresDedicatedAllocation,
12241  bool& prefersDedicatedAllocation) const
12242 {
12243 #if VMA_DEDICATED_ALLOCATION
12244  if(m_UseKhrDedicatedAllocation)
12245  {
12246  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12247  memReqInfo.buffer = hBuffer;
12248 
12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12250 
12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12252  memReq2.pNext = &memDedicatedReq;
12253 
12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12255 
12256  memReq = memReq2.memoryRequirements;
12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12259  }
12260  else
12261 #endif // #if VMA_DEDICATED_ALLOCATION
12262  {
12263  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12264  requiresDedicatedAllocation = false;
12265  prefersDedicatedAllocation = false;
12266  }
12267 }
12268 
12269 void VmaAllocator_T::GetImageMemoryRequirements(
12270  VkImage hImage,
12271  VkMemoryRequirements& memReq,
12272  bool& requiresDedicatedAllocation,
12273  bool& prefersDedicatedAllocation) const
12274 {
12275 #if VMA_DEDICATED_ALLOCATION
12276  if(m_UseKhrDedicatedAllocation)
12277  {
12278  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12279  memReqInfo.image = hImage;
12280 
12281  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12282 
12283  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12284  memReq2.pNext = &memDedicatedReq;
12285 
12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12287 
12288  memReq = memReq2.memoryRequirements;
12289  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12290  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12291  }
12292  else
12293 #endif // #if VMA_DEDICATED_ALLOCATION
12294  {
12295  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12296  requiresDedicatedAllocation = false;
12297  prefersDedicatedAllocation = false;
12298  }
12299 }
12300 
12301 VkResult VmaAllocator_T::AllocateMemory(
12302  const VkMemoryRequirements& vkMemReq,
12303  bool requiresDedicatedAllocation,
12304  bool prefersDedicatedAllocation,
12305  VkBuffer dedicatedBuffer,
12306  VkImage dedicatedImage,
12307  const VmaAllocationCreateInfo& createInfo,
12308  VmaSuballocationType suballocType,
12309  VmaAllocation* pAllocation)
12310 {
12311  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12312 
12313  if(vkMemReq.size == 0)
12314  {
12315  return VK_ERROR_VALIDATION_FAILED_EXT;
12316  }
12317  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12318  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12319  {
12320  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12322  }
12323  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12325  {
12326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12328  }
12329  if(requiresDedicatedAllocation)
12330  {
12331  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12332  {
12333  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12334  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12335  }
12336  if(createInfo.pool != VK_NULL_HANDLE)
12337  {
12338  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12340  }
12341  }
12342  if((createInfo.pool != VK_NULL_HANDLE) &&
12343  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12344  {
12345  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12347  }
12348 
12349  if(createInfo.pool != VK_NULL_HANDLE)
12350  {
12351  const VkDeviceSize alignmentForPool = VMA_MAX(
12352  vkMemReq.alignment,
12353  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12354  return createInfo.pool->m_BlockVector.Allocate(
12355  createInfo.pool,
12356  m_CurrentFrameIndex.load(),
12357  vkMemReq.size,
12358  alignmentForPool,
12359  createInfo,
12360  suballocType,
12361  pAllocation);
12362  }
12363  else
12364  {
12365  // Bit mask of memory Vulkan types acceptable for this allocation.
12366  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12367  uint32_t memTypeIndex = UINT32_MAX;
12368  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12369  if(res == VK_SUCCESS)
12370  {
12371  VkDeviceSize alignmentForMemType = VMA_MAX(
12372  vkMemReq.alignment,
12373  GetMemoryTypeMinAlignment(memTypeIndex));
12374 
12375  res = AllocateMemoryOfType(
12376  vkMemReq.size,
12377  alignmentForMemType,
12378  requiresDedicatedAllocation || prefersDedicatedAllocation,
12379  dedicatedBuffer,
12380  dedicatedImage,
12381  createInfo,
12382  memTypeIndex,
12383  suballocType,
12384  pAllocation);
12385  // Succeeded on first try.
12386  if(res == VK_SUCCESS)
12387  {
12388  return res;
12389  }
12390  // Allocation from this memory type failed. Try other compatible memory types.
12391  else
12392  {
12393  for(;;)
12394  {
12395  // Remove old memTypeIndex from list of possibilities.
12396  memoryTypeBits &= ~(1u << memTypeIndex);
12397  // Find alternative memTypeIndex.
12398  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12399  if(res == VK_SUCCESS)
12400  {
12401  alignmentForMemType = VMA_MAX(
12402  vkMemReq.alignment,
12403  GetMemoryTypeMinAlignment(memTypeIndex));
12404 
12405  res = AllocateMemoryOfType(
12406  vkMemReq.size,
12407  alignmentForMemType,
12408  requiresDedicatedAllocation || prefersDedicatedAllocation,
12409  dedicatedBuffer,
12410  dedicatedImage,
12411  createInfo,
12412  memTypeIndex,
12413  suballocType,
12414  pAllocation);
12415  // Allocation from this alternative memory type succeeded.
12416  if(res == VK_SUCCESS)
12417  {
12418  return res;
12419  }
12420  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12421  }
12422  // No other matching memory type index could be found.
12423  else
12424  {
12425  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12427  }
12428  }
12429  }
12430  }
12431  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12432  else
12433  return res;
12434  }
12435 }
12436 
12437 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12438 {
12439  VMA_ASSERT(allocation);
12440 
12441  if(TouchAllocation(allocation))
12442  {
12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12444  {
12445  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12446  }
12447 
12448  switch(allocation->GetType())
12449  {
12450  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12451  {
12452  VmaBlockVector* pBlockVector = VMA_NULL;
12453  VmaPool hPool = allocation->GetPool();
12454  if(hPool != VK_NULL_HANDLE)
12455  {
12456  pBlockVector = &hPool->m_BlockVector;
12457  }
12458  else
12459  {
12460  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12461  pBlockVector = m_pBlockVectors[memTypeIndex];
12462  }
12463  pBlockVector->Free(allocation);
12464  }
12465  break;
12466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12467  FreeDedicatedMemory(allocation);
12468  break;
12469  default:
12470  VMA_ASSERT(0);
12471  }
12472  }
12473 
12474  allocation->SetUserData(this, VMA_NULL);
12475  vma_delete(this, allocation);
12476 }
12477 
12478 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12479 {
12480  // Initialize.
12481  InitStatInfo(pStats->total);
12482  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12483  InitStatInfo(pStats->memoryType[i]);
12484  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12485  InitStatInfo(pStats->memoryHeap[i]);
12486 
12487  // Process default pools.
12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12489  {
12490  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12491  VMA_ASSERT(pBlockVector);
12492  pBlockVector->AddStats(pStats);
12493  }
12494 
12495  // Process custom pools.
12496  {
12497  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12498  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12499  {
12500  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12501  }
12502  }
12503 
12504  // Process dedicated allocations.
12505  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12506  {
12507  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12508  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12509  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12510  VMA_ASSERT(pDedicatedAllocVector);
12511  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12512  {
12513  VmaStatInfo allocationStatInfo;
12514  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12515  VmaAddStatInfo(pStats->total, allocationStatInfo);
12516  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12517  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12518  }
12519  }
12520 
12521  // Postprocess.
12522  VmaPostprocessCalcStatInfo(pStats->total);
12523  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12524  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12525  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12526  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12527 }
12528 
12529 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12530 
12531 VkResult VmaAllocator_T::Defragment(
12532  VmaAllocation* pAllocations,
12533  size_t allocationCount,
12534  VkBool32* pAllocationsChanged,
12535  const VmaDefragmentationInfo* pDefragmentationInfo,
12536  VmaDefragmentationStats* pDefragmentationStats)
12537 {
12538  if(pAllocationsChanged != VMA_NULL)
12539  {
12540  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12541  }
12542  if(pDefragmentationStats != VMA_NULL)
12543  {
12544  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12545  }
12546 
12547  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12548 
12549  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12550 
12551  const size_t poolCount = m_Pools.size();
12552 
12553  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12554  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12555  {
12556  VmaAllocation hAlloc = pAllocations[allocIndex];
12557  VMA_ASSERT(hAlloc);
12558  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12559  // DedicatedAlloc cannot be defragmented.
12560  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12561  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12562  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12563  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12564  // Lost allocation cannot be defragmented.
12565  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12566  {
12567  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12568 
12569  const VmaPool hAllocPool = hAlloc->GetPool();
12570  // This allocation belongs to custom pool.
12571  if(hAllocPool != VK_NULL_HANDLE)
12572  {
12573  // Pools with linear or buddy algorithm are not defragmented.
12574  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12575  {
12576  pAllocBlockVector = &hAllocPool->m_BlockVector;
12577  }
12578  }
12579  // This allocation belongs to general pool.
12580  else
12581  {
12582  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12583  }
12584 
12585  if(pAllocBlockVector != VMA_NULL)
12586  {
12587  VmaDefragmentator* const pDefragmentator =
12588  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12589  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12590  &pAllocationsChanged[allocIndex] : VMA_NULL;
12591  pDefragmentator->AddAllocation(hAlloc, pChanged);
12592  }
12593  }
12594  }
12595 
12596  VkResult result = VK_SUCCESS;
12597 
12598  // ======== Main processing.
12599 
12600  VkDeviceSize maxBytesToMove = SIZE_MAX;
12601  uint32_t maxAllocationsToMove = UINT32_MAX;
12602  if(pDefragmentationInfo != VMA_NULL)
12603  {
12604  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12605  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12606  }
12607 
12608  // Process standard memory.
12609  for(uint32_t memTypeIndex = 0;
12610  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12611  ++memTypeIndex)
12612  {
12613  // Only HOST_VISIBLE memory types can be defragmented.
12614  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12615  {
12616  result = m_pBlockVectors[memTypeIndex]->Defragment(
12617  pDefragmentationStats,
12618  maxBytesToMove,
12619  maxAllocationsToMove);
12620  }
12621  }
12622 
12623  // Process custom pools.
12624  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12625  {
12626  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12627  pDefragmentationStats,
12628  maxBytesToMove,
12629  maxAllocationsToMove);
12630  }
12631 
12632  // ======== Destroy defragmentators.
12633 
12634  // Process custom pools.
12635  for(size_t poolIndex = poolCount; poolIndex--; )
12636  {
12637  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12638  }
12639 
12640  // Process standard memory.
12641  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12642  {
12643  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12644  {
12645  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12646  }
12647  }
12648 
12649  return result;
12650 }
12651 
12652 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12653 {
12654  if(hAllocation->CanBecomeLost())
12655  {
12656  /*
12657  Warning: This is a carefully designed algorithm.
12658  Do not modify unless you really know what you're doing :)
12659  */
12660  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12662  for(;;)
12663  {
12664  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12665  {
12666  pAllocationInfo->memoryType = UINT32_MAX;
12667  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12668  pAllocationInfo->offset = 0;
12669  pAllocationInfo->size = hAllocation->GetSize();
12670  pAllocationInfo->pMappedData = VMA_NULL;
12671  pAllocationInfo->pUserData = hAllocation->GetUserData();
12672  return;
12673  }
12674  else if(localLastUseFrameIndex == localCurrFrameIndex)
12675  {
12676  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12677  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12678  pAllocationInfo->offset = hAllocation->GetOffset();
12679  pAllocationInfo->size = hAllocation->GetSize();
12680  pAllocationInfo->pMappedData = VMA_NULL;
12681  pAllocationInfo->pUserData = hAllocation->GetUserData();
12682  return;
12683  }
12684  else // Last use time earlier than current time.
12685  {
12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12687  {
12688  localLastUseFrameIndex = localCurrFrameIndex;
12689  }
12690  }
12691  }
12692  }
12693  else
12694  {
12695 #if VMA_STATS_STRING_ENABLED
12696  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12697  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12698  for(;;)
12699  {
12700  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12701  if(localLastUseFrameIndex == localCurrFrameIndex)
12702  {
12703  break;
12704  }
12705  else // Last use time earlier than current time.
12706  {
12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12708  {
12709  localLastUseFrameIndex = localCurrFrameIndex;
12710  }
12711  }
12712  }
12713 #endif
12714 
12715  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12716  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12717  pAllocationInfo->offset = hAllocation->GetOffset();
12718  pAllocationInfo->size = hAllocation->GetSize();
12719  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12720  pAllocationInfo->pUserData = hAllocation->GetUserData();
12721  }
12722 }
12723 
12724 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12725 {
12726  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12727  if(hAllocation->CanBecomeLost())
12728  {
12729  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12730  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12731  for(;;)
12732  {
12733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12734  {
12735  return false;
12736  }
12737  else if(localLastUseFrameIndex == localCurrFrameIndex)
12738  {
12739  return true;
12740  }
12741  else // Last use time earlier than current time.
12742  {
12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12744  {
12745  localLastUseFrameIndex = localCurrFrameIndex;
12746  }
12747  }
12748  }
12749  }
12750  else
12751  {
12752 #if VMA_STATS_STRING_ENABLED
12753  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12754  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12755  for(;;)
12756  {
12757  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12758  if(localLastUseFrameIndex == localCurrFrameIndex)
12759  {
12760  break;
12761  }
12762  else // Last use time earlier than current time.
12763  {
12764  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12765  {
12766  localLastUseFrameIndex = localCurrFrameIndex;
12767  }
12768  }
12769  }
12770 #endif
12771 
12772  return true;
12773  }
12774 }
12775 
12776 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12777 {
12778  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12779 
12780  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12781 
12782  if(newCreateInfo.maxBlockCount == 0)
12783  {
12784  newCreateInfo.maxBlockCount = SIZE_MAX;
12785  }
12786  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12787  {
12788  return VK_ERROR_INITIALIZATION_FAILED;
12789  }
12790 
12791  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12792 
12793  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12794 
12795  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12796  if(res != VK_SUCCESS)
12797  {
12798  vma_delete(this, *pPool);
12799  *pPool = VMA_NULL;
12800  return res;
12801  }
12802 
12803  // Add to m_Pools.
12804  {
12805  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12806  (*pPool)->SetId(m_NextPoolId++);
12807  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12808  }
12809 
12810  return VK_SUCCESS;
12811 }
12812 
12813 void VmaAllocator_T::DestroyPool(VmaPool pool)
12814 {
12815  // Remove from m_Pools.
12816  {
12817  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12818  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12819  VMA_ASSERT(success && "Pool not found in Allocator.");
12820  }
12821 
12822  vma_delete(this, pool);
12823 }
12824 
12825 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12826 {
12827  pool->m_BlockVector.GetPoolStats(pPoolStats);
12828 }
12829 
12830 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12831 {
12832  m_CurrentFrameIndex.store(frameIndex);
12833 }
12834 
12835 void VmaAllocator_T::MakePoolAllocationsLost(
12836  VmaPool hPool,
12837  size_t* pLostAllocationCount)
12838 {
12839  hPool->m_BlockVector.MakePoolAllocationsLost(
12840  m_CurrentFrameIndex.load(),
12841  pLostAllocationCount);
12842 }
12843 
12844 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12845 {
12846  return hPool->m_BlockVector.CheckCorruption();
12847 }
12848 
12849 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12850 {
12851  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12852 
12853  // Process default pools.
12854  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12855  {
12856  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12857  {
12858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12859  VMA_ASSERT(pBlockVector);
12860  VkResult localRes = pBlockVector->CheckCorruption();
12861  switch(localRes)
12862  {
12863  case VK_ERROR_FEATURE_NOT_PRESENT:
12864  break;
12865  case VK_SUCCESS:
12866  finalRes = VK_SUCCESS;
12867  break;
12868  default:
12869  return localRes;
12870  }
12871  }
12872  }
12873 
12874  // Process custom pools.
12875  {
12876  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12877  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12878  {
12879  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12880  {
12881  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12882  switch(localRes)
12883  {
12884  case VK_ERROR_FEATURE_NOT_PRESENT:
12885  break;
12886  case VK_SUCCESS:
12887  finalRes = VK_SUCCESS;
12888  break;
12889  default:
12890  return localRes;
12891  }
12892  }
12893  }
12894  }
12895 
12896  return finalRes;
12897 }
12898 
12899 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12900 {
12901  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12902  (*pAllocation)->InitLost();
12903 }
12904 
12905 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12906 {
12907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12908 
12909  VkResult res;
12910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12911  {
12912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12913  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12914  {
12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12916  if(res == VK_SUCCESS)
12917  {
12918  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12919  }
12920  }
12921  else
12922  {
12923  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12924  }
12925  }
12926  else
12927  {
12928  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12929  }
12930 
12931  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12932  {
12933  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12934  }
12935 
12936  return res;
12937 }
12938 
12939 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12940 {
12941  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12942  {
12943  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12944  }
12945 
12946  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12947 
12948  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12950  {
12951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12952  m_HeapSizeLimit[heapIndex] += size;
12953  }
12954 }
12955 
12956 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12957 {
12958  if(hAllocation->CanBecomeLost())
12959  {
12960  return VK_ERROR_MEMORY_MAP_FAILED;
12961  }
12962 
12963  switch(hAllocation->GetType())
12964  {
12965  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12966  {
12967  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12968  char *pBytes = VMA_NULL;
12969  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12970  if(res == VK_SUCCESS)
12971  {
12972  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12973  hAllocation->BlockAllocMap();
12974  }
12975  return res;
12976  }
12977  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12978  return hAllocation->DedicatedAllocMap(this, ppData);
12979  default:
12980  VMA_ASSERT(0);
12981  return VK_ERROR_MEMORY_MAP_FAILED;
12982  }
12983 }
12984 
12985 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12986 {
12987  switch(hAllocation->GetType())
12988  {
12989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12990  {
12991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12992  hAllocation->BlockAllocUnmap();
12993  pBlock->Unmap(this, 1);
12994  }
12995  break;
12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12997  hAllocation->DedicatedAllocUnmap(this);
12998  break;
12999  default:
13000  VMA_ASSERT(0);
13001  }
13002 }
13003 
13004 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13005 {
13006  VkResult res = VK_SUCCESS;
13007  switch(hAllocation->GetType())
13008  {
13009  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13010  res = GetVulkanFunctions().vkBindBufferMemory(
13011  m_hDevice,
13012  hBuffer,
13013  hAllocation->GetMemory(),
13014  0); //memoryOffset
13015  break;
13016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13017  {
13018  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13019  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13020  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13021  break;
13022  }
13023  default:
13024  VMA_ASSERT(0);
13025  }
13026  return res;
13027 }
13028 
13029 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13030 {
13031  VkResult res = VK_SUCCESS;
13032  switch(hAllocation->GetType())
13033  {
13034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13035  res = GetVulkanFunctions().vkBindImageMemory(
13036  m_hDevice,
13037  hImage,
13038  hAllocation->GetMemory(),
13039  0); //memoryOffset
13040  break;
13041  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13042  {
13043  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13044  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13045  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13046  break;
13047  }
13048  default:
13049  VMA_ASSERT(0);
13050  }
13051  return res;
13052 }
13053 
13054 void VmaAllocator_T::FlushOrInvalidateAllocation(
13055  VmaAllocation hAllocation,
13056  VkDeviceSize offset, VkDeviceSize size,
13057  VMA_CACHE_OPERATION op)
13058 {
13059  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13060  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13061  {
13062  const VkDeviceSize allocationSize = hAllocation->GetSize();
13063  VMA_ASSERT(offset <= allocationSize);
13064 
13065  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13066 
13067  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13068  memRange.memory = hAllocation->GetMemory();
13069 
13070  switch(hAllocation->GetType())
13071  {
13072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13074  if(size == VK_WHOLE_SIZE)
13075  {
13076  memRange.size = allocationSize - memRange.offset;
13077  }
13078  else
13079  {
13080  VMA_ASSERT(offset + size <= allocationSize);
13081  memRange.size = VMA_MIN(
13082  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13083  allocationSize - memRange.offset);
13084  }
13085  break;
13086 
13087  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13088  {
13089  // 1. Still within this allocation.
13090  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13091  if(size == VK_WHOLE_SIZE)
13092  {
13093  size = allocationSize - offset;
13094  }
13095  else
13096  {
13097  VMA_ASSERT(offset + size <= allocationSize);
13098  }
13099  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13100 
13101  // 2. Adjust to whole block.
13102  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13103  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13104  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13105  memRange.offset += allocationOffset;
13106  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13107 
13108  break;
13109  }
13110 
13111  default:
13112  VMA_ASSERT(0);
13113  }
13114 
13115  switch(op)
13116  {
13117  case VMA_CACHE_FLUSH:
13118  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13119  break;
13120  case VMA_CACHE_INVALIDATE:
13121  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13122  break;
13123  default:
13124  VMA_ASSERT(0);
13125  }
13126  }
13127  // else: Just ignore this call.
13128 }
13129 
13130 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13131 {
13132  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13133 
13134  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13135  {
13136  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13137  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13138  VMA_ASSERT(pDedicatedAllocations);
13139  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13140  VMA_ASSERT(success);
13141  }
13142 
13143  VkDeviceMemory hMemory = allocation->GetMemory();
13144 
13145  /*
13146  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13147  before vkFreeMemory.
13148 
13149  if(allocation->GetMappedData() != VMA_NULL)
13150  {
13151  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13152  }
13153  */
13154 
13155  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13156 
13157  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13158 }
13159 
13160 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13161 {
13162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13163  !hAllocation->CanBecomeLost() &&
13164  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13165  {
13166  void* pData = VMA_NULL;
13167  VkResult res = Map(hAllocation, &pData);
13168  if(res == VK_SUCCESS)
13169  {
13170  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13171  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13172  Unmap(hAllocation);
13173  }
13174  else
13175  {
13176  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13177  }
13178  }
13179 }
13180 
13181 #if VMA_STATS_STRING_ENABLED
13182 
13183 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13184 {
13185  bool dedicatedAllocationsStarted = false;
13186  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13187  {
13188  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13189  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13190  VMA_ASSERT(pDedicatedAllocVector);
13191  if(pDedicatedAllocVector->empty() == false)
13192  {
13193  if(dedicatedAllocationsStarted == false)
13194  {
13195  dedicatedAllocationsStarted = true;
13196  json.WriteString("DedicatedAllocations");
13197  json.BeginObject();
13198  }
13199 
13200  json.BeginString("Type ");
13201  json.ContinueString(memTypeIndex);
13202  json.EndString();
13203 
13204  json.BeginArray();
13205 
13206  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13207  {
13208  json.BeginObject(true);
13209  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13210  hAlloc->PrintParameters(json);
13211  json.EndObject();
13212  }
13213 
13214  json.EndArray();
13215  }
13216  }
13217  if(dedicatedAllocationsStarted)
13218  {
13219  json.EndObject();
13220  }
13221 
13222  {
13223  bool allocationsStarted = false;
13224  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13225  {
13226  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13227  {
13228  if(allocationsStarted == false)
13229  {
13230  allocationsStarted = true;
13231  json.WriteString("DefaultPools");
13232  json.BeginObject();
13233  }
13234 
13235  json.BeginString("Type ");
13236  json.ContinueString(memTypeIndex);
13237  json.EndString();
13238 
13239  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13240  }
13241  }
13242  if(allocationsStarted)
13243  {
13244  json.EndObject();
13245  }
13246  }
13247 
13248  // Custom pools
13249  {
13250  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13251  const size_t poolCount = m_Pools.size();
13252  if(poolCount > 0)
13253  {
13254  json.WriteString("Pools");
13255  json.BeginObject();
13256  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13257  {
13258  json.BeginString();
13259  json.ContinueString(m_Pools[poolIndex]->GetId());
13260  json.EndString();
13261 
13262  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13263  }
13264  json.EndObject();
13265  }
13266  }
13267 }
13268 
13269 #endif // #if VMA_STATS_STRING_ENABLED
13270 
13272 // Public interface
13273 
13274 VkResult vmaCreateAllocator(
13275  const VmaAllocatorCreateInfo* pCreateInfo,
13276  VmaAllocator* pAllocator)
13277 {
13278  VMA_ASSERT(pCreateInfo && pAllocator);
13279  VMA_DEBUG_LOG("vmaCreateAllocator");
13280  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13281  return (*pAllocator)->Init(pCreateInfo);
13282 }
13283 
13284 void vmaDestroyAllocator(
13285  VmaAllocator allocator)
13286 {
13287  if(allocator != VK_NULL_HANDLE)
13288  {
13289  VMA_DEBUG_LOG("vmaDestroyAllocator");
13290  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13291  vma_delete(&allocationCallbacks, allocator);
13292  }
13293 }
13294 
13296  VmaAllocator allocator,
13297  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13298 {
13299  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13300  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13301 }
13302 
13304  VmaAllocator allocator,
13305  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13306 {
13307  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13308  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13309 }
13310 
13312  VmaAllocator allocator,
13313  uint32_t memoryTypeIndex,
13314  VkMemoryPropertyFlags* pFlags)
13315 {
13316  VMA_ASSERT(allocator && pFlags);
13317  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13318  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13319 }
13320 
13322  VmaAllocator allocator,
13323  uint32_t frameIndex)
13324 {
13325  VMA_ASSERT(allocator);
13326  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13327 
13328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13329 
13330  allocator->SetCurrentFrameIndex(frameIndex);
13331 }
13332 
13333 void vmaCalculateStats(
13334  VmaAllocator allocator,
13335  VmaStats* pStats)
13336 {
13337  VMA_ASSERT(allocator && pStats);
13338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13339  allocator->CalculateStats(pStats);
13340 }
13341 
13342 #if VMA_STATS_STRING_ENABLED
13343 
13344 void vmaBuildStatsString(
13345  VmaAllocator allocator,
13346  char** ppStatsString,
13347  VkBool32 detailedMap)
13348 {
13349  VMA_ASSERT(allocator && ppStatsString);
13350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13351 
13352  VmaStringBuilder sb(allocator);
13353  {
13354  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13355  json.BeginObject();
13356 
13357  VmaStats stats;
13358  allocator->CalculateStats(&stats);
13359 
13360  json.WriteString("Total");
13361  VmaPrintStatInfo(json, stats.total);
13362 
13363  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13364  {
13365  json.BeginString("Heap ");
13366  json.ContinueString(heapIndex);
13367  json.EndString();
13368  json.BeginObject();
13369 
13370  json.WriteString("Size");
13371  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13372 
13373  json.WriteString("Flags");
13374  json.BeginArray(true);
13375  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13376  {
13377  json.WriteString("DEVICE_LOCAL");
13378  }
13379  json.EndArray();
13380 
13381  if(stats.memoryHeap[heapIndex].blockCount > 0)
13382  {
13383  json.WriteString("Stats");
13384  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13385  }
13386 
13387  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13388  {
13389  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13390  {
13391  json.BeginString("Type ");
13392  json.ContinueString(typeIndex);
13393  json.EndString();
13394 
13395  json.BeginObject();
13396 
13397  json.WriteString("Flags");
13398  json.BeginArray(true);
13399  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13400  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13401  {
13402  json.WriteString("DEVICE_LOCAL");
13403  }
13404  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13405  {
13406  json.WriteString("HOST_VISIBLE");
13407  }
13408  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13409  {
13410  json.WriteString("HOST_COHERENT");
13411  }
13412  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13413  {
13414  json.WriteString("HOST_CACHED");
13415  }
13416  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13417  {
13418  json.WriteString("LAZILY_ALLOCATED");
13419  }
13420  json.EndArray();
13421 
13422  if(stats.memoryType[typeIndex].blockCount > 0)
13423  {
13424  json.WriteString("Stats");
13425  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13426  }
13427 
13428  json.EndObject();
13429  }
13430  }
13431 
13432  json.EndObject();
13433  }
13434  if(detailedMap == VK_TRUE)
13435  {
13436  allocator->PrintDetailedMap(json);
13437  }
13438 
13439  json.EndObject();
13440  }
13441 
13442  const size_t len = sb.GetLength();
13443  char* const pChars = vma_new_array(allocator, char, len + 1);
13444  if(len > 0)
13445  {
13446  memcpy(pChars, sb.GetData(), len);
13447  }
13448  pChars[len] = '\0';
13449  *ppStatsString = pChars;
13450 }
13451 
13452 void vmaFreeStatsString(
13453  VmaAllocator allocator,
13454  char* pStatsString)
13455 {
13456  if(pStatsString != VMA_NULL)
13457  {
13458  VMA_ASSERT(allocator);
13459  size_t len = strlen(pStatsString);
13460  vma_delete_array(allocator, pStatsString, len + 1);
13461  }
13462 }
13463 
13464 #endif // #if VMA_STATS_STRING_ENABLED
13465 
13466 /*
13467 This function is not protected by any mutex because it just reads immutable data.
13468 */
13469 VkResult vmaFindMemoryTypeIndex(
13470  VmaAllocator allocator,
13471  uint32_t memoryTypeBits,
13472  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13473  uint32_t* pMemoryTypeIndex)
13474 {
13475  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13476  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13477  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13478 
13479  if(pAllocationCreateInfo->memoryTypeBits != 0)
13480  {
13481  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13482  }
13483 
13484  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13485  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13486 
13487  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13488  if(mapped)
13489  {
13490  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13491  }
13492 
13493  // Convert usage to requiredFlags and preferredFlags.
13494  switch(pAllocationCreateInfo->usage)
13495  {
13497  break;
13499  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13500  {
13501  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13502  }
13503  break;
13505  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13506  break;
13508  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13509  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13510  {
13511  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13512  }
13513  break;
13515  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13516  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13517  break;
13518  default:
13519  break;
13520  }
13521 
13522  *pMemoryTypeIndex = UINT32_MAX;
13523  uint32_t minCost = UINT32_MAX;
13524  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13525  memTypeIndex < allocator->GetMemoryTypeCount();
13526  ++memTypeIndex, memTypeBit <<= 1)
13527  {
13528  // This memory type is acceptable according to memoryTypeBits bitmask.
13529  if((memTypeBit & memoryTypeBits) != 0)
13530  {
13531  const VkMemoryPropertyFlags currFlags =
13532  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13533  // This memory type contains requiredFlags.
13534  if((requiredFlags & ~currFlags) == 0)
13535  {
13536  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13537  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13538  // Remember memory type with lowest cost.
13539  if(currCost < minCost)
13540  {
13541  *pMemoryTypeIndex = memTypeIndex;
13542  if(currCost == 0)
13543  {
13544  return VK_SUCCESS;
13545  }
13546  minCost = currCost;
13547  }
13548  }
13549  }
13550  }
13551  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13552 }
13553 
13555  VmaAllocator allocator,
13556  const VkBufferCreateInfo* pBufferCreateInfo,
13557  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13558  uint32_t* pMemoryTypeIndex)
13559 {
13560  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13561  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13562  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13563  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13564 
13565  const VkDevice hDev = allocator->m_hDevice;
13566  VkBuffer hBuffer = VK_NULL_HANDLE;
13567  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13568  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13569  if(res == VK_SUCCESS)
13570  {
13571  VkMemoryRequirements memReq = {};
13572  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13573  hDev, hBuffer, &memReq);
13574 
13575  res = vmaFindMemoryTypeIndex(
13576  allocator,
13577  memReq.memoryTypeBits,
13578  pAllocationCreateInfo,
13579  pMemoryTypeIndex);
13580 
13581  allocator->GetVulkanFunctions().vkDestroyBuffer(
13582  hDev, hBuffer, allocator->GetAllocationCallbacks());
13583  }
13584  return res;
13585 }
13586 
13588  VmaAllocator allocator,
13589  const VkImageCreateInfo* pImageCreateInfo,
13590  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13591  uint32_t* pMemoryTypeIndex)
13592 {
13593  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13594  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13597 
13598  const VkDevice hDev = allocator->m_hDevice;
13599  VkImage hImage = VK_NULL_HANDLE;
13600  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13601  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13602  if(res == VK_SUCCESS)
13603  {
13604  VkMemoryRequirements memReq = {};
13605  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13606  hDev, hImage, &memReq);
13607 
13608  res = vmaFindMemoryTypeIndex(
13609  allocator,
13610  memReq.memoryTypeBits,
13611  pAllocationCreateInfo,
13612  pMemoryTypeIndex);
13613 
13614  allocator->GetVulkanFunctions().vkDestroyImage(
13615  hDev, hImage, allocator->GetAllocationCallbacks());
13616  }
13617  return res;
13618 }
13619 
13620 VkResult vmaCreatePool(
13621  VmaAllocator allocator,
13622  const VmaPoolCreateInfo* pCreateInfo,
13623  VmaPool* pPool)
13624 {
13625  VMA_ASSERT(allocator && pCreateInfo && pPool);
13626 
13627  VMA_DEBUG_LOG("vmaCreatePool");
13628 
13629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13630 
13631  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13632 
13633 #if VMA_RECORDING_ENABLED
13634  if(allocator->GetRecorder() != VMA_NULL)
13635  {
13636  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13637  }
13638 #endif
13639 
13640  return res;
13641 }
13642 
13643 void vmaDestroyPool(
13644  VmaAllocator allocator,
13645  VmaPool pool)
13646 {
13647  VMA_ASSERT(allocator);
13648 
13649  if(pool == VK_NULL_HANDLE)
13650  {
13651  return;
13652  }
13653 
13654  VMA_DEBUG_LOG("vmaDestroyPool");
13655 
13656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13657 
13658 #if VMA_RECORDING_ENABLED
13659  if(allocator->GetRecorder() != VMA_NULL)
13660  {
13661  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13662  }
13663 #endif
13664 
13665  allocator->DestroyPool(pool);
13666 }
13667 
13668 void vmaGetPoolStats(
13669  VmaAllocator allocator,
13670  VmaPool pool,
13671  VmaPoolStats* pPoolStats)
13672 {
13673  VMA_ASSERT(allocator && pool && pPoolStats);
13674 
13675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13676 
13677  allocator->GetPoolStats(pool, pPoolStats);
13678 }
13679 
13681  VmaAllocator allocator,
13682  VmaPool pool,
13683  size_t* pLostAllocationCount)
13684 {
13685  VMA_ASSERT(allocator && pool);
13686 
13687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13688 
13689 #if VMA_RECORDING_ENABLED
13690  if(allocator->GetRecorder() != VMA_NULL)
13691  {
13692  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13693  }
13694 #endif
13695 
13696  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13697 }
13698 
13699 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13700 {
13701  VMA_ASSERT(allocator && pool);
13702 
13703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13704 
13705  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13706 
13707  return allocator->CheckPoolCorruption(pool);
13708 }
13709 
13710 VkResult vmaAllocateMemory(
13711  VmaAllocator allocator,
13712  const VkMemoryRequirements* pVkMemoryRequirements,
13713  const VmaAllocationCreateInfo* pCreateInfo,
13714  VmaAllocation* pAllocation,
13715  VmaAllocationInfo* pAllocationInfo)
13716 {
13717  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13718 
13719  VMA_DEBUG_LOG("vmaAllocateMemory");
13720 
13721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13722 
13723  VkResult result = allocator->AllocateMemory(
13724  *pVkMemoryRequirements,
13725  false, // requiresDedicatedAllocation
13726  false, // prefersDedicatedAllocation
13727  VK_NULL_HANDLE, // dedicatedBuffer
13728  VK_NULL_HANDLE, // dedicatedImage
13729  *pCreateInfo,
13730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13731  pAllocation);
13732 
13733 #if VMA_RECORDING_ENABLED
13734  if(allocator->GetRecorder() != VMA_NULL)
13735  {
13736  allocator->GetRecorder()->RecordAllocateMemory(
13737  allocator->GetCurrentFrameIndex(),
13738  *pVkMemoryRequirements,
13739  *pCreateInfo,
13740  *pAllocation);
13741  }
13742 #endif
13743 
13744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13745  {
13746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13747  }
13748 
13749  return result;
13750 }
13751 
13753  VmaAllocator allocator,
13754  VkBuffer buffer,
13755  const VmaAllocationCreateInfo* pCreateInfo,
13756  VmaAllocation* pAllocation,
13757  VmaAllocationInfo* pAllocationInfo)
13758 {
13759  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13760 
13761  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13762 
13763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13764 
13765  VkMemoryRequirements vkMemReq = {};
13766  bool requiresDedicatedAllocation = false;
13767  bool prefersDedicatedAllocation = false;
13768  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13769  requiresDedicatedAllocation,
13770  prefersDedicatedAllocation);
13771 
13772  VkResult result = allocator->AllocateMemory(
13773  vkMemReq,
13774  requiresDedicatedAllocation,
13775  prefersDedicatedAllocation,
13776  buffer, // dedicatedBuffer
13777  VK_NULL_HANDLE, // dedicatedImage
13778  *pCreateInfo,
13779  VMA_SUBALLOCATION_TYPE_BUFFER,
13780  pAllocation);
13781 
13782 #if VMA_RECORDING_ENABLED
13783  if(allocator->GetRecorder() != VMA_NULL)
13784  {
13785  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13786  allocator->GetCurrentFrameIndex(),
13787  vkMemReq,
13788  requiresDedicatedAllocation,
13789  prefersDedicatedAllocation,
13790  *pCreateInfo,
13791  *pAllocation);
13792  }
13793 #endif
13794 
13795  if(pAllocationInfo && result == VK_SUCCESS)
13796  {
13797  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13798  }
13799 
13800  return result;
13801 }
13802 
13803 VkResult vmaAllocateMemoryForImage(
13804  VmaAllocator allocator,
13805  VkImage image,
13806  const VmaAllocationCreateInfo* pCreateInfo,
13807  VmaAllocation* pAllocation,
13808  VmaAllocationInfo* pAllocationInfo)
13809 {
13810  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13811 
13812  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13813 
13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13815 
13816  VkMemoryRequirements vkMemReq = {};
13817  bool requiresDedicatedAllocation = false;
13818  bool prefersDedicatedAllocation = false;
13819  allocator->GetImageMemoryRequirements(image, vkMemReq,
13820  requiresDedicatedAllocation, prefersDedicatedAllocation);
13821 
13822  VkResult result = allocator->AllocateMemory(
13823  vkMemReq,
13824  requiresDedicatedAllocation,
13825  prefersDedicatedAllocation,
13826  VK_NULL_HANDLE, // dedicatedBuffer
13827  image, // dedicatedImage
13828  *pCreateInfo,
13829  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13830  pAllocation);
13831 
13832 #if VMA_RECORDING_ENABLED
13833  if(allocator->GetRecorder() != VMA_NULL)
13834  {
13835  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13836  allocator->GetCurrentFrameIndex(),
13837  vkMemReq,
13838  requiresDedicatedAllocation,
13839  prefersDedicatedAllocation,
13840  *pCreateInfo,
13841  *pAllocation);
13842  }
13843 #endif
13844 
13845  if(pAllocationInfo && result == VK_SUCCESS)
13846  {
13847  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13848  }
13849 
13850  return result;
13851 }
13852 
13853 void vmaFreeMemory(
13854  VmaAllocator allocator,
13855  VmaAllocation allocation)
13856 {
13857  VMA_ASSERT(allocator);
13858 
13859  if(allocation == VK_NULL_HANDLE)
13860  {
13861  return;
13862  }
13863 
13864  VMA_DEBUG_LOG("vmaFreeMemory");
13865 
13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordFreeMemory(
13872  allocator->GetCurrentFrameIndex(),
13873  allocation);
13874  }
13875 #endif
13876 
13877  allocator->FreeMemory(allocation);
13878 }
13879 
13881  VmaAllocator allocator,
13882  VmaAllocation allocation,
13883  VmaAllocationInfo* pAllocationInfo)
13884 {
13885  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13886 
13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13888 
13889 #if VMA_RECORDING_ENABLED
13890  if(allocator->GetRecorder() != VMA_NULL)
13891  {
13892  allocator->GetRecorder()->RecordGetAllocationInfo(
13893  allocator->GetCurrentFrameIndex(),
13894  allocation);
13895  }
13896 #endif
13897 
13898  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13899 }
13900 
13901 VkBool32 vmaTouchAllocation(
13902  VmaAllocator allocator,
13903  VmaAllocation allocation)
13904 {
13905  VMA_ASSERT(allocator && allocation);
13906 
13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13908 
13909 #if VMA_RECORDING_ENABLED
13910  if(allocator->GetRecorder() != VMA_NULL)
13911  {
13912  allocator->GetRecorder()->RecordTouchAllocation(
13913  allocator->GetCurrentFrameIndex(),
13914  allocation);
13915  }
13916 #endif
13917 
13918  return allocator->TouchAllocation(allocation);
13919 }
13920 
13922  VmaAllocator allocator,
13923  VmaAllocation allocation,
13924  void* pUserData)
13925 {
13926  VMA_ASSERT(allocator && allocation);
13927 
13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13929 
13930  allocation->SetUserData(allocator, pUserData);
13931 
13932 #if VMA_RECORDING_ENABLED
13933  if(allocator->GetRecorder() != VMA_NULL)
13934  {
13935  allocator->GetRecorder()->RecordSetAllocationUserData(
13936  allocator->GetCurrentFrameIndex(),
13937  allocation,
13938  pUserData);
13939  }
13940 #endif
13941 }
13942 
13944  VmaAllocator allocator,
13945  VmaAllocation* pAllocation)
13946 {
13947  VMA_ASSERT(allocator && pAllocation);
13948 
13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13950 
13951  allocator->CreateLostAllocation(pAllocation);
13952 
13953 #if VMA_RECORDING_ENABLED
13954  if(allocator->GetRecorder() != VMA_NULL)
13955  {
13956  allocator->GetRecorder()->RecordCreateLostAllocation(
13957  allocator->GetCurrentFrameIndex(),
13958  *pAllocation);
13959  }
13960 #endif
13961 }
13962 
13963 VkResult vmaMapMemory(
13964  VmaAllocator allocator,
13965  VmaAllocation allocation,
13966  void** ppData)
13967 {
13968  VMA_ASSERT(allocator && allocation && ppData);
13969 
13970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13971 
13972  VkResult res = allocator->Map(allocation, ppData);
13973 
13974 #if VMA_RECORDING_ENABLED
13975  if(allocator->GetRecorder() != VMA_NULL)
13976  {
13977  allocator->GetRecorder()->RecordMapMemory(
13978  allocator->GetCurrentFrameIndex(),
13979  allocation);
13980  }
13981 #endif
13982 
13983  return res;
13984 }
13985 
13986 void vmaUnmapMemory(
13987  VmaAllocator allocator,
13988  VmaAllocation allocation)
13989 {
13990  VMA_ASSERT(allocator && allocation);
13991 
13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13993 
13994 #if VMA_RECORDING_ENABLED
13995  if(allocator->GetRecorder() != VMA_NULL)
13996  {
13997  allocator->GetRecorder()->RecordUnmapMemory(
13998  allocator->GetCurrentFrameIndex(),
13999  allocation);
14000  }
14001 #endif
14002 
14003  allocator->Unmap(allocation);
14004 }
14005 
14006 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14007 {
14008  VMA_ASSERT(allocator && allocation);
14009 
14010  VMA_DEBUG_LOG("vmaFlushAllocation");
14011 
14012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14013 
14014  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14015 
14016 #if VMA_RECORDING_ENABLED
14017  if(allocator->GetRecorder() != VMA_NULL)
14018  {
14019  allocator->GetRecorder()->RecordFlushAllocation(
14020  allocator->GetCurrentFrameIndex(),
14021  allocation, offset, size);
14022  }
14023 #endif
14024 }
14025 
14026 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14027 {
14028  VMA_ASSERT(allocator && allocation);
14029 
14030  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14031 
14032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14033 
14034  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14035 
14036 #if VMA_RECORDING_ENABLED
14037  if(allocator->GetRecorder() != VMA_NULL)
14038  {
14039  allocator->GetRecorder()->RecordInvalidateAllocation(
14040  allocator->GetCurrentFrameIndex(),
14041  allocation, offset, size);
14042  }
14043 #endif
14044 }
14045 
14046 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14047 {
14048  VMA_ASSERT(allocator);
14049 
14050  VMA_DEBUG_LOG("vmaCheckCorruption");
14051 
14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14053 
14054  return allocator->CheckCorruption(memoryTypeBits);
14055 }
14056 
14057 VkResult vmaDefragment(
14058  VmaAllocator allocator,
14059  VmaAllocation* pAllocations,
14060  size_t allocationCount,
14061  VkBool32* pAllocationsChanged,
14062  const VmaDefragmentationInfo *pDefragmentationInfo,
14063  VmaDefragmentationStats* pDefragmentationStats)
14064 {
14065  VMA_ASSERT(allocator && pAllocations);
14066 
14067  VMA_DEBUG_LOG("vmaDefragment");
14068 
14069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14070 
14071  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14072 }
14073 
14074 VkResult vmaBindBufferMemory(
14075  VmaAllocator allocator,
14076  VmaAllocation allocation,
14077  VkBuffer buffer)
14078 {
14079  VMA_ASSERT(allocator && allocation && buffer);
14080 
14081  VMA_DEBUG_LOG("vmaBindBufferMemory");
14082 
14083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14084 
14085  return allocator->BindBufferMemory(allocation, buffer);
14086 }
14087 
14088 VkResult vmaBindImageMemory(
14089  VmaAllocator allocator,
14090  VmaAllocation allocation,
14091  VkImage image)
14092 {
14093  VMA_ASSERT(allocator && allocation && image);
14094 
14095  VMA_DEBUG_LOG("vmaBindImageMemory");
14096 
14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14098 
14099  return allocator->BindImageMemory(allocation, image);
14100 }
14101 
14102 VkResult vmaCreateBuffer(
14103  VmaAllocator allocator,
14104  const VkBufferCreateInfo* pBufferCreateInfo,
14105  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14106  VkBuffer* pBuffer,
14107  VmaAllocation* pAllocation,
14108  VmaAllocationInfo* pAllocationInfo)
14109 {
14110  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14111 
14112  if(pBufferCreateInfo->size == 0)
14113  {
14114  return VK_ERROR_VALIDATION_FAILED_EXT;
14115  }
14116 
14117  VMA_DEBUG_LOG("vmaCreateBuffer");
14118 
14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14120 
14121  *pBuffer = VK_NULL_HANDLE;
14122  *pAllocation = VK_NULL_HANDLE;
14123 
14124  // 1. Create VkBuffer.
14125  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14126  allocator->m_hDevice,
14127  pBufferCreateInfo,
14128  allocator->GetAllocationCallbacks(),
14129  pBuffer);
14130  if(res >= 0)
14131  {
14132  // 2. vkGetBufferMemoryRequirements.
14133  VkMemoryRequirements vkMemReq = {};
14134  bool requiresDedicatedAllocation = false;
14135  bool prefersDedicatedAllocation = false;
14136  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14137  requiresDedicatedAllocation, prefersDedicatedAllocation);
14138 
14139  // Make sure alignment requirements for specific buffer usages reported
14140  // in Physical Device Properties are included in alignment reported by memory requirements.
14141  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14142  {
14143  VMA_ASSERT(vkMemReq.alignment %
14144  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14145  }
14146  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14147  {
14148  VMA_ASSERT(vkMemReq.alignment %
14149  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14150  }
14151  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14152  {
14153  VMA_ASSERT(vkMemReq.alignment %
14154  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14155  }
14156 
14157  // 3. Allocate memory using allocator.
14158  res = allocator->AllocateMemory(
14159  vkMemReq,
14160  requiresDedicatedAllocation,
14161  prefersDedicatedAllocation,
14162  *pBuffer, // dedicatedBuffer
14163  VK_NULL_HANDLE, // dedicatedImage
14164  *pAllocationCreateInfo,
14165  VMA_SUBALLOCATION_TYPE_BUFFER,
14166  pAllocation);
14167 
14168 #if VMA_RECORDING_ENABLED
14169  if(allocator->GetRecorder() != VMA_NULL)
14170  {
14171  allocator->GetRecorder()->RecordCreateBuffer(
14172  allocator->GetCurrentFrameIndex(),
14173  *pBufferCreateInfo,
14174  *pAllocationCreateInfo,
14175  *pAllocation);
14176  }
14177 #endif
14178 
14179  if(res >= 0)
14180  {
14181  // 3. Bind buffer with memory.
14182  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14183  if(res >= 0)
14184  {
14185  // All steps succeeded.
14186  #if VMA_STATS_STRING_ENABLED
14187  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14188  #endif
14189  if(pAllocationInfo != VMA_NULL)
14190  {
14191  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14192  }
14193 
14194  return VK_SUCCESS;
14195  }
14196  allocator->FreeMemory(*pAllocation);
14197  *pAllocation = VK_NULL_HANDLE;
14198  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14199  *pBuffer = VK_NULL_HANDLE;
14200  return res;
14201  }
14202  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14203  *pBuffer = VK_NULL_HANDLE;
14204  return res;
14205  }
14206  return res;
14207 }
14208 
14209 void vmaDestroyBuffer(
14210  VmaAllocator allocator,
14211  VkBuffer buffer,
14212  VmaAllocation allocation)
14213 {
14214  VMA_ASSERT(allocator);
14215 
14216  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14217  {
14218  return;
14219  }
14220 
14221  VMA_DEBUG_LOG("vmaDestroyBuffer");
14222 
14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14224 
14225 #if VMA_RECORDING_ENABLED
14226  if(allocator->GetRecorder() != VMA_NULL)
14227  {
14228  allocator->GetRecorder()->RecordDestroyBuffer(
14229  allocator->GetCurrentFrameIndex(),
14230  allocation);
14231  }
14232 #endif
14233 
14234  if(buffer != VK_NULL_HANDLE)
14235  {
14236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14237  }
14238 
14239  if(allocation != VK_NULL_HANDLE)
14240  {
14241  allocator->FreeMemory(allocation);
14242  }
14243 }
14244 
14245 VkResult vmaCreateImage(
14246  VmaAllocator allocator,
14247  const VkImageCreateInfo* pImageCreateInfo,
14248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14249  VkImage* pImage,
14250  VmaAllocation* pAllocation,
14251  VmaAllocationInfo* pAllocationInfo)
14252 {
14253  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14254 
14255  if(pImageCreateInfo->extent.width == 0 ||
14256  pImageCreateInfo->extent.height == 0 ||
14257  pImageCreateInfo->extent.depth == 0 ||
14258  pImageCreateInfo->mipLevels == 0 ||
14259  pImageCreateInfo->arrayLayers == 0)
14260  {
14261  return VK_ERROR_VALIDATION_FAILED_EXT;
14262  }
14263 
14264  VMA_DEBUG_LOG("vmaCreateImage");
14265 
14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14267 
14268  *pImage = VK_NULL_HANDLE;
14269  *pAllocation = VK_NULL_HANDLE;
14270 
14271  // 1. Create VkImage.
14272  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14273  allocator->m_hDevice,
14274  pImageCreateInfo,
14275  allocator->GetAllocationCallbacks(),
14276  pImage);
14277  if(res >= 0)
14278  {
14279  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14280  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14281  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14282 
14283  // 2. Allocate memory using allocator.
14284  VkMemoryRequirements vkMemReq = {};
14285  bool requiresDedicatedAllocation = false;
14286  bool prefersDedicatedAllocation = false;
14287  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14288  requiresDedicatedAllocation, prefersDedicatedAllocation);
14289 
14290  res = allocator->AllocateMemory(
14291  vkMemReq,
14292  requiresDedicatedAllocation,
14293  prefersDedicatedAllocation,
14294  VK_NULL_HANDLE, // dedicatedBuffer
14295  *pImage, // dedicatedImage
14296  *pAllocationCreateInfo,
14297  suballocType,
14298  pAllocation);
14299 
14300 #if VMA_RECORDING_ENABLED
14301  if(allocator->GetRecorder() != VMA_NULL)
14302  {
14303  allocator->GetRecorder()->RecordCreateImage(
14304  allocator->GetCurrentFrameIndex(),
14305  *pImageCreateInfo,
14306  *pAllocationCreateInfo,
14307  *pAllocation);
14308  }
14309 #endif
14310 
14311  if(res >= 0)
14312  {
14313  // 3. Bind image with memory.
14314  res = allocator->BindImageMemory(*pAllocation, *pImage);
14315  if(res >= 0)
14316  {
14317  // All steps succeeded.
14318  #if VMA_STATS_STRING_ENABLED
14319  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14320  #endif
14321  if(pAllocationInfo != VMA_NULL)
14322  {
14323  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14324  }
14325 
14326  return VK_SUCCESS;
14327  }
14328  allocator->FreeMemory(*pAllocation);
14329  *pAllocation = VK_NULL_HANDLE;
14330  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14331  *pImage = VK_NULL_HANDLE;
14332  return res;
14333  }
14334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14335  *pImage = VK_NULL_HANDLE;
14336  return res;
14337  }
14338  return res;
14339 }
14340 
14341 void vmaDestroyImage(
14342  VmaAllocator allocator,
14343  VkImage image,
14344  VmaAllocation allocation)
14345 {
14346  VMA_ASSERT(allocator);
14347 
14348  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14349  {
14350  return;
14351  }
14352 
14353  VMA_DEBUG_LOG("vmaDestroyImage");
14354 
14355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14356 
14357 #if VMA_RECORDING_ENABLED
14358  if(allocator->GetRecorder() != VMA_NULL)
14359  {
14360  allocator->GetRecorder()->RecordDestroyImage(
14361  allocator->GetCurrentFrameIndex(),
14362  allocation);
14363  }
14364 #endif
14365 
14366  if(image != VK_NULL_HANDLE)
14367  {
14368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14369  }
14370  if(allocation != VK_NULL_HANDLE)
14371  {
14372  allocator->FreeMemory(allocation);
14373  }
14374 }
14375 
14376 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1584
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1885
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1632
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1641
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
-
Definition: vk_mem_alloc.h:1606
-
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2198
-
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1587
+
Definition: vk_mem_alloc.h:1615
+
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2207
+
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1596
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Definition: vk_mem_alloc.h:1833
-
Definition: vk_mem_alloc.h:1936
-
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1579
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2298
-
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1629
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2543
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2087
-
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1476
+
Definition: vk_mem_alloc.h:1842
+
Definition: vk_mem_alloc.h:1945
+
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1588
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2307
+
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1638
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2552
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2096
+
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1485
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2179
-
Definition: vk_mem_alloc.h:1913
-
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1568
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1986
-
Definition: vk_mem_alloc.h:1860
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1641
-
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2115
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2188
+
Definition: vk_mem_alloc.h:1922
+
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1577
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1995
+
Definition: vk_mem_alloc.h:1869
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1650
+
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2124
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1694
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1626
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1703
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1635
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1864
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1873
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1766
-
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1584
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1765
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2547
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1775
+
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1593
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1774
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2556
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1658
-
VmaStatInfo total
Definition: vk_mem_alloc.h:1775
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2555
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1970
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2538
-
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1585
-
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1510
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1667
+
VmaStatInfo total
Definition: vk_mem_alloc.h:1784
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2564
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1979
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2547
+
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1594
+
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1519
Represents main object of this library initialized.
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1635
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1644
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2129
-
Definition: vk_mem_alloc.h:2123
-
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1701
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2308
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2138
+
Definition: vk_mem_alloc.h:2132
+
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1710
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2317
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1580
-
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1604
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2007
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2149
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2185
+
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1589
+
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1613
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2016
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2158
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2194
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
-
Definition: vk_mem_alloc.h:1566
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2132
+
Definition: vk_mem_alloc.h:1575
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2141
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:1811
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:1820
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2533
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2542
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2551
-
Definition: vk_mem_alloc.h:1850
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1994
-
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1583
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2560
+
Definition: vk_mem_alloc.h:1859
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2003
+
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1592
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1771
-
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1516
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1780
+
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1525
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
- +
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1537
+
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1546
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
-
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1608
-
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1542
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2553
+
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1617
+
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1551
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2562
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1981
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2195
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1990
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2204
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
-
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1576
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1754
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2144
-
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1529
-
Definition: vk_mem_alloc.h:2119
+
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1585
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1763
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2153
+
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1538
+
Definition: vk_mem_alloc.h:2128
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1920
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1767
-
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1533
-
Definition: vk_mem_alloc.h:1944
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2135
-
Definition: vk_mem_alloc.h:1859
-
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1582
+
Definition: vk_mem_alloc.h:1929
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1776
+
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1542
+
Definition: vk_mem_alloc.h:1953
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2144
+
Definition: vk_mem_alloc.h:1868
+
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1591
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1976
-
Definition: vk_mem_alloc.h:1967
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1985
+
Definition: vk_mem_alloc.h:1976
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1757
-
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1578
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2157
-
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1644
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2188
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1965
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2000
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1766
+
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1587
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2166
+
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1653
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2197
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1974
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2009
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1682
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1773
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1900
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1766
+
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1691
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1782
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1909
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1775
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1589
-
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1614
-
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1531
-
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1588
+
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1598
+
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1623
+
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1540
+
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1597
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2171
-
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1581
-
Definition: vk_mem_alloc.h:1931
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2180
+
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1590
+
Definition: vk_mem_alloc.h:1940
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1622
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2322
-
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1638
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1766
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1763
+
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1631
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2331
+
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1647
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1775
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1772
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2176
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2185
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
-
Definition: vk_mem_alloc.h:1940
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2303
-
Definition: vk_mem_alloc.h:1951
-
Definition: vk_mem_alloc.h:1963
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2549
-
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1574
+
Definition: vk_mem_alloc.h:1949
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2312
+
Definition: vk_mem_alloc.h:1960
+
Definition: vk_mem_alloc.h:1972
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2558
+
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1583
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1761
-
Definition: vk_mem_alloc.h:1816
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2125
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1770
+
Definition: vk_mem_alloc.h:1825
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2134
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1611
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1759
-
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1586
-
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1590
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1887
-
Definition: vk_mem_alloc.h:1958
-
Definition: vk_mem_alloc.h:1843
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2317
+
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1620
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1768
+
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1595
+
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1599
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1896
+
Definition: vk_mem_alloc.h:1967
+
Definition: vk_mem_alloc.h:1852
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2326
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
-
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1564
+
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1573
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
-
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1577
-
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2104
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2284
+
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1586
+
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2113
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2293
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1948
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2069
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1767
+
Definition: vk_mem_alloc.h:1957
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2078
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1776
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
- -
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1598
-
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1774
+ +
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1607
+
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1783
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2182
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1767
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2191
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1776
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2289
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2298