From 7f97202addbf9ce6d94a07b4338ca0ce11804a7e Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Fri, 16 Nov 2018 13:43:34 +0100 Subject: [PATCH] Updated documentation of memory mapping with known bug in MoltenVK, based on #47. Thanks @DiegoAce ! --- docs/html/memory_mapping.html | 1 + docs/html/vk__mem__alloc_8h_source.html | 260 ++++++++++++------------ src/vk_mem_alloc.h | 4 +- 3 files changed, 134 insertions(+), 131 deletions(-) diff --git a/docs/html/memory_mapping.html b/docs/html/memory_mapping.html index e464c14..c584e46 100644 --- a/docs/html/memory_mapping.html +++ b/docs/html/memory_mapping.html @@ -82,6 +82,7 @@ Persistently mapped memory
VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
bufCreateInfo.size = sizeof(ConstantBuffer);
bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
VkBuffer buf;
vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
// Buffer is already mapped. You can access its memory.
memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));

There are some exceptions though, when you should consider mapping memory only for a short period of time:

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 3e42ce3..cc767f1 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,189 +65,189 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1477 /*
1478 Define this macro to 0/1 to disable/enable support for recording functionality,
1479 available through VmaAllocatorCreateInfo::pRecordSettings.
1480 */
1481 #ifndef VMA_RECORDING_ENABLED
1482  #ifdef _WIN32
1483  #define VMA_RECORDING_ENABLED 1
1484  #else
1485  #define VMA_RECORDING_ENABLED 0
1486  #endif
1487 #endif
1488 
1489 #ifndef NOMINMAX
1490  #define NOMINMAX // For windows.h
1491 #endif
1492 
1493 #include <vulkan/vulkan.h>
1494 
1495 #if VMA_RECORDING_ENABLED
1496  #include <windows.h>
1497 #endif
1498 
1499 #if !defined(VMA_DEDICATED_ALLOCATION)
1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1501  #define VMA_DEDICATED_ALLOCATION 1
1502  #else
1503  #define VMA_DEDICATED_ALLOCATION 0
1504  #endif
1505 #endif
1506 
1516 VK_DEFINE_HANDLE(VmaAllocator)
1517 
1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1520  VmaAllocator allocator,
1521  uint32_t memoryType,
1522  VkDeviceMemory memory,
1523  VkDeviceSize size);
1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1526  VmaAllocator allocator,
1527  uint32_t memoryType,
1528  VkDeviceMemory memory,
1529  VkDeviceSize size);
1530 
1544 
1574 
1577 typedef VkFlags VmaAllocatorCreateFlags;
1578 
1583 typedef struct VmaVulkanFunctions {
1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1586  PFN_vkAllocateMemory vkAllocateMemory;
1587  PFN_vkFreeMemory vkFreeMemory;
1588  PFN_vkMapMemory vkMapMemory;
1589  PFN_vkUnmapMemory vkUnmapMemory;
1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1592  PFN_vkBindBufferMemory vkBindBufferMemory;
1593  PFN_vkBindImageMemory vkBindImageMemory;
1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1596  PFN_vkCreateBuffer vkCreateBuffer;
1597  PFN_vkDestroyBuffer vkDestroyBuffer;
1598  PFN_vkCreateImage vkCreateImage;
1599  PFN_vkDestroyImage vkDestroyImage;
1600 #if VMA_DEDICATED_ALLOCATION
1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1603 #endif
1605 
1607 typedef enum VmaRecordFlagBits {
1614 
1617 typedef VkFlags VmaRecordFlags;
1618 
1620 typedef struct VmaRecordSettings
1621 {
1631  const char* pFilePath;
1633 
1636 {
1640 
1641  VkPhysicalDevice physicalDevice;
1643 
1644  VkDevice device;
1646 
1649 
1650  const VkAllocationCallbacks* pAllocationCallbacks;
1652 
1691  const VkDeviceSize* pHeapSizeLimit;
1712 
1714 VkResult vmaCreateAllocator(
1715  const VmaAllocatorCreateInfo* pCreateInfo,
1716  VmaAllocator* pAllocator);
1717 
1719 void vmaDestroyAllocator(
1720  VmaAllocator allocator);
1721 
1727  VmaAllocator allocator,
1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1729 
1735  VmaAllocator allocator,
1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1737 
1745  VmaAllocator allocator,
1746  uint32_t memoryTypeIndex,
1747  VkMemoryPropertyFlags* pFlags);
1748 
1758  VmaAllocator allocator,
1759  uint32_t frameIndex);
1760 
1763 typedef struct VmaStatInfo
1764 {
1766  uint32_t blockCount;
1772  VkDeviceSize usedBytes;
1774  VkDeviceSize unusedBytes;
1777 } VmaStatInfo;
1778 
1780 typedef struct VmaStats
1781 {
1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1785 } VmaStats;
1786 
1788 void vmaCalculateStats(
1789  VmaAllocator allocator,
1790  VmaStats* pStats);
1791 
1792 #define VMA_STATS_STRING_ENABLED 1
1793 
1794 #if VMA_STATS_STRING_ENABLED
1795 
1797 
1799 void vmaBuildStatsString(
1800  VmaAllocator allocator,
1801  char** ppStatsString,
1802  VkBool32 detailedMap);
1803 
1804 void vmaFreeStatsString(
1805  VmaAllocator allocator,
1806  char* pStatsString);
1807 
1808 #endif // #if VMA_STATS_STRING_ENABLED
1809 
1818 VK_DEFINE_HANDLE(VmaPool)
1819 
1820 typedef enum VmaMemoryUsage
1821 {
1870 } VmaMemoryUsage;
1871 
1886 
1941 
1954 
1964 
1971 
1975 
1977 {
1990  VkMemoryPropertyFlags requiredFlags;
1995  VkMemoryPropertyFlags preferredFlags;
2003  uint32_t memoryTypeBits;
2016  void* pUserData;
2018 
2035 VkResult vmaFindMemoryTypeIndex(
2036  VmaAllocator allocator,
2037  uint32_t memoryTypeBits,
2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2039  uint32_t* pMemoryTypeIndex);
2040 
2054  VmaAllocator allocator,
2055  const VkBufferCreateInfo* pBufferCreateInfo,
2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2057  uint32_t* pMemoryTypeIndex);
2058 
2072  VmaAllocator allocator,
2073  const VkImageCreateInfo* pImageCreateInfo,
2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2075  uint32_t* pMemoryTypeIndex);
2076 
2097 
2114 
2125 
2131 
2134 typedef VkFlags VmaPoolCreateFlags;
2135 
2138 typedef struct VmaPoolCreateInfo {
2153  VkDeviceSize blockSize;
2182 
2185 typedef struct VmaPoolStats {
2188  VkDeviceSize size;
2191  VkDeviceSize unusedSize;
2204  VkDeviceSize unusedRangeSizeMax;
2207  size_t blockCount;
2208 } VmaPoolStats;
2209 
2216 VkResult vmaCreatePool(
2217  VmaAllocator allocator,
2218  const VmaPoolCreateInfo* pCreateInfo,
2219  VmaPool* pPool);
2220 
2223 void vmaDestroyPool(
2224  VmaAllocator allocator,
2225  VmaPool pool);
2226 
2233 void vmaGetPoolStats(
2234  VmaAllocator allocator,
2235  VmaPool pool,
2236  VmaPoolStats* pPoolStats);
2237 
2245  VmaAllocator allocator,
2246  VmaPool pool,
2247  size_t* pLostAllocationCount);
2248 
2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2264 
2289 VK_DEFINE_HANDLE(VmaAllocation)
2290 
2291 
2293 typedef struct VmaAllocationInfo {
2298  uint32_t memoryType;
2307  VkDeviceMemory deviceMemory;
2312  VkDeviceSize offset;
2317  VkDeviceSize size;
2331  void* pUserData;
2333 
2344 VkResult vmaAllocateMemory(
2345  VmaAllocator allocator,
2346  const VkMemoryRequirements* pVkMemoryRequirements,
2347  const VmaAllocationCreateInfo* pCreateInfo,
2348  VmaAllocation* pAllocation,
2349  VmaAllocationInfo* pAllocationInfo);
2350 
2358  VmaAllocator allocator,
2359  VkBuffer buffer,
2360  const VmaAllocationCreateInfo* pCreateInfo,
2361  VmaAllocation* pAllocation,
2362  VmaAllocationInfo* pAllocationInfo);
2363 
2365 VkResult vmaAllocateMemoryForImage(
2366  VmaAllocator allocator,
2367  VkImage image,
2368  const VmaAllocationCreateInfo* pCreateInfo,
2369  VmaAllocation* pAllocation,
2370  VmaAllocationInfo* pAllocationInfo);
2371 
2373 void vmaFreeMemory(
2374  VmaAllocator allocator,
2375  VmaAllocation allocation);
2376 
2397 VkResult vmaResizeAllocation(
2398  VmaAllocator allocator,
2399  VmaAllocation allocation,
2400  VkDeviceSize newSize);
2401 
2419  VmaAllocator allocator,
2420  VmaAllocation allocation,
2421  VmaAllocationInfo* pAllocationInfo);
2422 
2437 VkBool32 vmaTouchAllocation(
2438  VmaAllocator allocator,
2439  VmaAllocation allocation);
2440 
2455  VmaAllocator allocator,
2456  VmaAllocation allocation,
2457  void* pUserData);
2458 
2470  VmaAllocator allocator,
2471  VmaAllocation* pAllocation);
2472 
2507 VkResult vmaMapMemory(
2508  VmaAllocator allocator,
2509  VmaAllocation allocation,
2510  void** ppData);
2511 
2516 void vmaUnmapMemory(
2517  VmaAllocator allocator,
2518  VmaAllocation allocation);
2519 
2532 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2533 
2546 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2547 
2564 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2565 
2567 typedef struct VmaDefragmentationInfo {
2572  VkDeviceSize maxBytesToMove;
2579 
2581 typedef struct VmaDefragmentationStats {
2583  VkDeviceSize bytesMoved;
2585  VkDeviceSize bytesFreed;
2591 
2630 VkResult vmaDefragment(
2631  VmaAllocator allocator,
2632  VmaAllocation* pAllocations,
2633  size_t allocationCount,
2634  VkBool32* pAllocationsChanged,
2635  const VmaDefragmentationInfo *pDefragmentationInfo,
2636  VmaDefragmentationStats* pDefragmentationStats);
2637 
2650 VkResult vmaBindBufferMemory(
2651  VmaAllocator allocator,
2652  VmaAllocation allocation,
2653  VkBuffer buffer);
2654 
2667 VkResult vmaBindImageMemory(
2668  VmaAllocator allocator,
2669  VmaAllocation allocation,
2670  VkImage image);
2671 
2698 VkResult vmaCreateBuffer(
2699  VmaAllocator allocator,
2700  const VkBufferCreateInfo* pBufferCreateInfo,
2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2702  VkBuffer* pBuffer,
2703  VmaAllocation* pAllocation,
2704  VmaAllocationInfo* pAllocationInfo);
2705 
2717 void vmaDestroyBuffer(
2718  VmaAllocator allocator,
2719  VkBuffer buffer,
2720  VmaAllocation allocation);
2721 
2723 VkResult vmaCreateImage(
2724  VmaAllocator allocator,
2725  const VkImageCreateInfo* pImageCreateInfo,
2726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2727  VkImage* pImage,
2728  VmaAllocation* pAllocation,
2729  VmaAllocationInfo* pAllocationInfo);
2730 
2742 void vmaDestroyImage(
2743  VmaAllocator allocator,
2744  VkImage image,
2745  VmaAllocation allocation);
2746 
2747 #ifdef __cplusplus
2748 }
2749 #endif
2750 
2751 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2752 
2753 // For Visual Studio IntelliSense.
2754 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2755 #define VMA_IMPLEMENTATION
2756 #endif
2757 
2758 #ifdef VMA_IMPLEMENTATION
2759 #undef VMA_IMPLEMENTATION
2760 
2761 #include <cstdint>
2762 #include <cstdlib>
2763 #include <cstring>
2764 
2765 /*******************************************************************************
2766 CONFIGURATION SECTION
2767 
2768 Define some of these macros before each #include of this header or change them
2769 here if you need other then default behavior depending on your environment.
2770 */
2771 
2772 /*
2773 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2774 internally, like:
2775 
2776  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2777 
2778 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2779 VmaAllocatorCreateInfo::pVulkanFunctions.
2780 */
2781 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2782 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2783 #endif
2784 
2785 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2786 //#define VMA_USE_STL_CONTAINERS 1
2787 
2788 /* Set this macro to 1 to make the library including and using STL containers:
2789 std::pair, std::vector, std::list, std::unordered_map.
2790 
2791 Set it to 0 or undefined to make the library using its own implementation of
2792 the containers.
2793 */
2794 #if VMA_USE_STL_CONTAINERS
2795  #define VMA_USE_STL_VECTOR 1
2796  #define VMA_USE_STL_UNORDERED_MAP 1
2797  #define VMA_USE_STL_LIST 1
2798 #endif
2799 
2800 #if VMA_USE_STL_VECTOR
2801  #include <vector>
2802 #endif
2803 
2804 #if VMA_USE_STL_UNORDERED_MAP
2805  #include <unordered_map>
2806 #endif
2807 
2808 #if VMA_USE_STL_LIST
2809  #include <list>
2810 #endif
2811 
2812 /*
2813 Following headers are used in this CONFIGURATION section only, so feel free to
2814 remove them if not needed.
2815 */
2816 #include <cassert> // for assert
2817 #include <algorithm> // for min, max
2818 #include <mutex> // for std::mutex
2819 #include <atomic> // for std::atomic
2820 
2821 #ifndef VMA_NULL
2822  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2823  #define VMA_NULL nullptr
2824 #endif
2825 
2826 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2827 #include <cstdlib>
2828 void *aligned_alloc(size_t alignment, size_t size)
2829 {
2830  // alignment must be >= sizeof(void*)
2831  if(alignment < sizeof(void*))
2832  {
2833  alignment = sizeof(void*);
2834  }
2835 
2836  return memalign(alignment, size);
2837 }
2838 #elif defined(__APPLE__) || defined(__ANDROID__)
2839 #include <cstdlib>
2840 void *aligned_alloc(size_t alignment, size_t size)
2841 {
2842  // alignment must be >= sizeof(void*)
2843  if(alignment < sizeof(void*))
2844  {
2845  alignment = sizeof(void*);
2846  }
2847 
2848  void *pointer;
2849  if(posix_memalign(&pointer, alignment, size) == 0)
2850  return pointer;
2851  return VMA_NULL;
2852 }
2853 #endif
2854 
2855 // If your compiler is not compatible with C++11 and definition of
2856 // aligned_alloc() function is missing, uncommeting following line may help:
2857 
2858 //#include <malloc.h>
2859 
2860 // Normal assert to check for programmer's errors, especially in Debug configuration.
2861 #ifndef VMA_ASSERT
2862  #ifdef _DEBUG
2863  #define VMA_ASSERT(expr) assert(expr)
2864  #else
2865  #define VMA_ASSERT(expr)
2866  #endif
2867 #endif
2868 
2869 // Assert that will be called very often, like inside data structures e.g. operator[].
2870 // Making it non-empty can make program slow.
2871 #ifndef VMA_HEAVY_ASSERT
2872  #ifdef _DEBUG
2873  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2874  #else
2875  #define VMA_HEAVY_ASSERT(expr)
2876  #endif
2877 #endif
2878 
2879 #ifndef VMA_ALIGN_OF
2880  #define VMA_ALIGN_OF(type) (__alignof(type))
2881 #endif
2882 
2883 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2884  #if defined(_WIN32)
2885  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2886  #else
2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2888  #endif
2889 #endif
2890 
2891 #ifndef VMA_SYSTEM_FREE
2892  #if defined(_WIN32)
2893  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2894  #else
2895  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2896  #endif
2897 #endif
2898 
2899 #ifndef VMA_MIN
2900  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2901 #endif
2902 
2903 #ifndef VMA_MAX
2904  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2905 #endif
2906 
2907 #ifndef VMA_SWAP
2908  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2909 #endif
2910 
2911 #ifndef VMA_SORT
2912  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2913 #endif
2914 
2915 #ifndef VMA_DEBUG_LOG
2916  #define VMA_DEBUG_LOG(format, ...)
2917  /*
2918  #define VMA_DEBUG_LOG(format, ...) do { \
2919  printf(format, __VA_ARGS__); \
2920  printf("\n"); \
2921  } while(false)
2922  */
2923 #endif
2924 
2925 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2926 #if VMA_STATS_STRING_ENABLED
2927  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2928  {
2929  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2930  }
2931  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2932  {
2933  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2934  }
2935  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2936  {
2937  snprintf(outStr, strLen, "%p", ptr);
2938  }
2939 #endif
2940 
2941 #ifndef VMA_MUTEX
2942  class VmaMutex
2943  {
2944  public:
2945  VmaMutex() { }
2946  ~VmaMutex() { }
2947  void Lock() { m_Mutex.lock(); }
2948  void Unlock() { m_Mutex.unlock(); }
2949  private:
2950  std::mutex m_Mutex;
2951  };
2952  #define VMA_MUTEX VmaMutex
2953 #endif
2954 
2955 /*
2956 If providing your own implementation, you need to implement a subset of std::atomic:
2957 
2958 - Constructor(uint32_t desired)
2959 - uint32_t load() const
2960 - void store(uint32_t desired)
2961 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2962 */
2963 #ifndef VMA_ATOMIC_UINT32
2964  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2965 #endif
2966 
2967 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2968 
2972  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2973 #endif
2974 
2975 #ifndef VMA_DEBUG_ALIGNMENT
2976 
2980  #define VMA_DEBUG_ALIGNMENT (1)
2981 #endif
2982 
2983 #ifndef VMA_DEBUG_MARGIN
2984 
2988  #define VMA_DEBUG_MARGIN (0)
2989 #endif
2990 
2991 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2992 
2996  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2997 #endif
2998 
2999 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3000 
3005  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3006 #endif
3007 
3008 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3009 
3013  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3014 #endif
3015 
3016 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3017 
3021  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3022 #endif
3023 
3024 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3025  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3027 #endif
3028 
3029 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3030  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3032 #endif
3033 
3034 #ifndef VMA_CLASS_NO_COPY
3035  #define VMA_CLASS_NO_COPY(className) \
3036  private: \
3037  className(const className&) = delete; \
3038  className& operator=(const className&) = delete;
3039 #endif
3040 
3041 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3042 
3043 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3044 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3045 
3046 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3047 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3048 
3049 /*******************************************************************************
3050 END OF CONFIGURATION
3051 */
3052 
3053 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3054  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3055 
3056 // Returns number of bits set to 1 in (v).
3057 static inline uint32_t VmaCountBitsSet(uint32_t v)
3058 {
3059  uint32_t c = v - ((v >> 1) & 0x55555555);
3060  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3061  c = ((c >> 4) + c) & 0x0F0F0F0F;
3062  c = ((c >> 8) + c) & 0x00FF00FF;
3063  c = ((c >> 16) + c) & 0x0000FFFF;
3064  return c;
3065 }
3066 
3067 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3068 // Use types like uint32_t, uint64_t as T.
3069 template <typename T>
3070 static inline T VmaAlignUp(T val, T align)
3071 {
3072  return (val + align - 1) / align * align;
3073 }
3074 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3075 // Use types like uint32_t, uint64_t as T.
3076 template <typename T>
3077 static inline T VmaAlignDown(T val, T align)
3078 {
3079  return val / align * align;
3080 }
3081 
3082 // Division with mathematical rounding to nearest number.
3083 template <typename T>
3084 static inline T VmaRoundDiv(T x, T y)
3085 {
3086  return (x + (y / (T)2)) / y;
3087 }
3088 
3089 /*
3090 Returns true if given number is a power of two.
3091 T must be unsigned integer number or signed integer but always nonnegative.
3092 For 0 returns true.
3093 */
3094 template <typename T>
3095 inline bool VmaIsPow2(T x)
3096 {
3097  return (x & (x-1)) == 0;
3098 }
3099 
3100 // Returns smallest power of 2 greater or equal to v.
3101 static inline uint32_t VmaNextPow2(uint32_t v)
3102 {
3103  v--;
3104  v |= v >> 1;
3105  v |= v >> 2;
3106  v |= v >> 4;
3107  v |= v >> 8;
3108  v |= v >> 16;
3109  v++;
3110  return v;
3111 }
3112 static inline uint64_t VmaNextPow2(uint64_t v)
3113 {
3114  v--;
3115  v |= v >> 1;
3116  v |= v >> 2;
3117  v |= v >> 4;
3118  v |= v >> 8;
3119  v |= v >> 16;
3120  v |= v >> 32;
3121  v++;
3122  return v;
3123 }
3124 
3125 // Returns largest power of 2 less or equal to v.
3126 static inline uint32_t VmaPrevPow2(uint32_t v)
3127 {
3128  v |= v >> 1;
3129  v |= v >> 2;
3130  v |= v >> 4;
3131  v |= v >> 8;
3132  v |= v >> 16;
3133  v = v ^ (v >> 1);
3134  return v;
3135 }
3136 static inline uint64_t VmaPrevPow2(uint64_t v)
3137 {
3138  v |= v >> 1;
3139  v |= v >> 2;
3140  v |= v >> 4;
3141  v |= v >> 8;
3142  v |= v >> 16;
3143  v |= v >> 32;
3144  v = v ^ (v >> 1);
3145  return v;
3146 }
3147 
3148 static inline bool VmaStrIsEmpty(const char* pStr)
3149 {
3150  return pStr == VMA_NULL || *pStr == '\0';
3151 }
3152 
3153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3154 {
3155  switch(algorithm)
3156  {
3158  return "Linear";
3160  return "Buddy";
3161  case 0:
3162  return "Default";
3163  default:
3164  VMA_ASSERT(0);
3165  return "";
3166  }
3167 }
3168 
3169 #ifndef VMA_SORT
3170 
3171 template<typename Iterator, typename Compare>
3172 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3173 {
3174  Iterator centerValue = end; --centerValue;
3175  Iterator insertIndex = beg;
3176  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3177  {
3178  if(cmp(*memTypeIndex, *centerValue))
3179  {
3180  if(insertIndex != memTypeIndex)
3181  {
3182  VMA_SWAP(*memTypeIndex, *insertIndex);
3183  }
3184  ++insertIndex;
3185  }
3186  }
3187  if(insertIndex != centerValue)
3188  {
3189  VMA_SWAP(*insertIndex, *centerValue);
3190  }
3191  return insertIndex;
3192 }
3193 
3194 template<typename Iterator, typename Compare>
3195 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3196 {
3197  if(beg < end)
3198  {
3199  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3200  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3201  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3202  }
3203 }
3204 
3205 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3206 
3207 #endif // #ifndef VMA_SORT
3208 
3209 /*
3210 Returns true if two memory blocks occupy overlapping pages.
3211 ResourceA must be in less memory offset than ResourceB.
3212 
3213 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3214 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3215 */
3216 static inline bool VmaBlocksOnSamePage(
3217  VkDeviceSize resourceAOffset,
3218  VkDeviceSize resourceASize,
3219  VkDeviceSize resourceBOffset,
3220  VkDeviceSize pageSize)
3221 {
3222  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3223  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3224  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3225  VkDeviceSize resourceBStart = resourceBOffset;
3226  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3227  return resourceAEndPage == resourceBStartPage;
3228 }
3229 
3230 enum VmaSuballocationType
3231 {
3232  VMA_SUBALLOCATION_TYPE_FREE = 0,
3233  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3234  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3235  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3236  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3237  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3238  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3239 };
3240 
3241 /*
3242 Returns true if given suballocation types could conflict and must respect
3243 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3244 or linear image and another one is optimal image. If type is unknown, behave
3245 conservatively.
3246 */
3247 static inline bool VmaIsBufferImageGranularityConflict(
3248  VmaSuballocationType suballocType1,
3249  VmaSuballocationType suballocType2)
3250 {
3251  if(suballocType1 > suballocType2)
3252  {
3253  VMA_SWAP(suballocType1, suballocType2);
3254  }
3255 
3256  switch(suballocType1)
3257  {
3258  case VMA_SUBALLOCATION_TYPE_FREE:
3259  return false;
3260  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3261  return true;
3262  case VMA_SUBALLOCATION_TYPE_BUFFER:
3263  return
3264  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3266  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3267  return
3268  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3271  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3272  return
3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3274  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3275  return false;
3276  default:
3277  VMA_ASSERT(0);
3278  return true;
3279  }
3280 }
3281 
3282 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3283 {
3284  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3285  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3286  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3287  {
3288  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3289  }
3290 }
3291 
3292 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3293 {
3294  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3295  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3296  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3297  {
3298  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3299  {
3300  return false;
3301  }
3302  }
3303  return true;
3304 }
3305 
3306 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3307 struct VmaMutexLock
3308 {
3309  VMA_CLASS_NO_COPY(VmaMutexLock)
3310 public:
3311  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3312  m_pMutex(useMutex ? &mutex : VMA_NULL)
3313  {
3314  if(m_pMutex)
3315  {
3316  m_pMutex->Lock();
3317  }
3318  }
3319 
3320  ~VmaMutexLock()
3321  {
3322  if(m_pMutex)
3323  {
3324  m_pMutex->Unlock();
3325  }
3326  }
3327 
3328 private:
3329  VMA_MUTEX* m_pMutex;
3330 };
3331 
3332 #if VMA_DEBUG_GLOBAL_MUTEX
3333  static VMA_MUTEX gDebugGlobalMutex;
3334  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3335 #else
3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3337 #endif
3338 
3339 // Minimum size of a free suballocation to register it in the free suballocation collection.
3340 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3341 
3342 /*
3343 Performs binary search and returns iterator to first element that is greater or
3344 equal to (key), according to comparison (cmp).
3345 
3346 Cmp should return true if first argument is less than second argument.
3347 
3348 Returned value is the found element, if present in the collection or place where
3349 new element with value (key) should be inserted.
3350 */
3351 template <typename CmpLess, typename IterT, typename KeyT>
3352 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3353 {
3354  size_t down = 0, up = (end - beg);
3355  while(down < up)
3356  {
3357  const size_t mid = (down + up) / 2;
3358  if(cmp(*(beg+mid), key))
3359  {
3360  down = mid + 1;
3361  }
3362  else
3363  {
3364  up = mid;
3365  }
3366  }
3367  return beg + down;
3368 }
3369 
3371 // Memory allocation
3372 
3373 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3374 {
3375  if((pAllocationCallbacks != VMA_NULL) &&
3376  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3377  {
3378  return (*pAllocationCallbacks->pfnAllocation)(
3379  pAllocationCallbacks->pUserData,
3380  size,
3381  alignment,
3382  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3383  }
3384  else
3385  {
3386  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3387  }
3388 }
3389 
3390 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3391 {
3392  if((pAllocationCallbacks != VMA_NULL) &&
3393  (pAllocationCallbacks->pfnFree != VMA_NULL))
3394  {
3395  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3396  }
3397  else
3398  {
3399  VMA_SYSTEM_FREE(ptr);
3400  }
3401 }
3402 
3403 template<typename T>
3404 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3405 {
3406  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3407 }
3408 
3409 template<typename T>
3410 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3411 {
3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3413 }
3414 
3415 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3416 
3417 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3418 
3419 template<typename T>
3420 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3421 {
3422  ptr->~T();
3423  VmaFree(pAllocationCallbacks, ptr);
3424 }
3425 
3426 template<typename T>
3427 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3428 {
3429  if(ptr != VMA_NULL)
3430  {
3431  for(size_t i = count; i--; )
3432  {
3433  ptr[i].~T();
3434  }
3435  VmaFree(pAllocationCallbacks, ptr);
3436  }
3437 }
3438 
3439 // STL-compatible allocator.
3440 template<typename T>
3441 class VmaStlAllocator
3442 {
3443 public:
3444  const VkAllocationCallbacks* const m_pCallbacks;
3445  typedef T value_type;
3446 
3447  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3448  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3449 
3450  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3451  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3452 
3453  template<typename U>
3454  bool operator==(const VmaStlAllocator<U>& rhs) const
3455  {
3456  return m_pCallbacks == rhs.m_pCallbacks;
3457  }
3458  template<typename U>
3459  bool operator!=(const VmaStlAllocator<U>& rhs) const
3460  {
3461  return m_pCallbacks != rhs.m_pCallbacks;
3462  }
3463 
3464  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3465 };
3466 
3467 #if VMA_USE_STL_VECTOR
3468 
3469 #define VmaVector std::vector
3470 
3471 template<typename T, typename allocatorT>
3472 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3473 {
3474  vec.insert(vec.begin() + index, item);
3475 }
3476 
3477 template<typename T, typename allocatorT>
3478 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3479 {
3480  vec.erase(vec.begin() + index);
3481 }
3482 
3483 #else // #if VMA_USE_STL_VECTOR
3484 
3485 /* Class with interface compatible with subset of std::vector.
3486 T must be POD because constructors and destructors are not called and memcpy is
3487 used for these objects. */
3488 template<typename T, typename AllocatorT>
3489 class VmaVector
3490 {
3491 public:
3492  typedef T value_type;
3493 
3494  VmaVector(const AllocatorT& allocator) :
3495  m_Allocator(allocator),
3496  m_pArray(VMA_NULL),
3497  m_Count(0),
3498  m_Capacity(0)
3499  {
3500  }
3501 
3502  VmaVector(size_t count, const AllocatorT& allocator) :
3503  m_Allocator(allocator),
3504  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3505  m_Count(count),
3506  m_Capacity(count)
3507  {
3508  }
3509 
3510  VmaVector(const VmaVector<T, AllocatorT>& src) :
3511  m_Allocator(src.m_Allocator),
3512  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3513  m_Count(src.m_Count),
3514  m_Capacity(src.m_Count)
3515  {
3516  if(m_Count != 0)
3517  {
3518  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3519  }
3520  }
3521 
3522  ~VmaVector()
3523  {
3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3525  }
3526 
3527  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3528  {
3529  if(&rhs != this)
3530  {
3531  resize(rhs.m_Count);
3532  if(m_Count != 0)
3533  {
3534  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3535  }
3536  }
3537  return *this;
3538  }
3539 
3540  bool empty() const { return m_Count == 0; }
3541  size_t size() const { return m_Count; }
3542  T* data() { return m_pArray; }
3543  const T* data() const { return m_pArray; }
3544 
3545  T& operator[](size_t index)
3546  {
3547  VMA_HEAVY_ASSERT(index < m_Count);
3548  return m_pArray[index];
3549  }
3550  const T& operator[](size_t index) const
3551  {
3552  VMA_HEAVY_ASSERT(index < m_Count);
3553  return m_pArray[index];
3554  }
3555 
3556  T& front()
3557  {
3558  VMA_HEAVY_ASSERT(m_Count > 0);
3559  return m_pArray[0];
3560  }
3561  const T& front() const
3562  {
3563  VMA_HEAVY_ASSERT(m_Count > 0);
3564  return m_pArray[0];
3565  }
3566  T& back()
3567  {
3568  VMA_HEAVY_ASSERT(m_Count > 0);
3569  return m_pArray[m_Count - 1];
3570  }
3571  const T& back() const
3572  {
3573  VMA_HEAVY_ASSERT(m_Count > 0);
3574  return m_pArray[m_Count - 1];
3575  }
3576 
3577  void reserve(size_t newCapacity, bool freeMemory = false)
3578  {
3579  newCapacity = VMA_MAX(newCapacity, m_Count);
3580 
3581  if((newCapacity < m_Capacity) && !freeMemory)
3582  {
3583  newCapacity = m_Capacity;
3584  }
3585 
3586  if(newCapacity != m_Capacity)
3587  {
3588  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3589  if(m_Count != 0)
3590  {
3591  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3592  }
3593  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3594  m_Capacity = newCapacity;
3595  m_pArray = newArray;
3596  }
3597  }
3598 
3599  void resize(size_t newCount, bool freeMemory = false)
3600  {
3601  size_t newCapacity = m_Capacity;
3602  if(newCount > m_Capacity)
3603  {
3604  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3605  }
3606  else if(freeMemory)
3607  {
3608  newCapacity = newCount;
3609  }
3610 
3611  if(newCapacity != m_Capacity)
3612  {
3613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3614  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3615  if(elementsToCopy != 0)
3616  {
3617  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3618  }
3619  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3620  m_Capacity = newCapacity;
3621  m_pArray = newArray;
3622  }
3623 
3624  m_Count = newCount;
3625  }
3626 
3627  void clear(bool freeMemory = false)
3628  {
3629  resize(0, freeMemory);
3630  }
3631 
3632  void insert(size_t index, const T& src)
3633  {
3634  VMA_HEAVY_ASSERT(index <= m_Count);
3635  const size_t oldCount = size();
3636  resize(oldCount + 1);
3637  if(index < oldCount)
3638  {
3639  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3640  }
3641  m_pArray[index] = src;
3642  }
3643 
3644  void remove(size_t index)
3645  {
3646  VMA_HEAVY_ASSERT(index < m_Count);
3647  const size_t oldCount = size();
3648  if(index < oldCount - 1)
3649  {
3650  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3651  }
3652  resize(oldCount - 1);
3653  }
3654 
3655  void push_back(const T& src)
3656  {
3657  const size_t newIndex = size();
3658  resize(newIndex + 1);
3659  m_pArray[newIndex] = src;
3660  }
3661 
3662  void pop_back()
3663  {
3664  VMA_HEAVY_ASSERT(m_Count > 0);
3665  resize(size() - 1);
3666  }
3667 
3668  void push_front(const T& src)
3669  {
3670  insert(0, src);
3671  }
3672 
3673  void pop_front()
3674  {
3675  VMA_HEAVY_ASSERT(m_Count > 0);
3676  remove(0);
3677  }
3678 
3679  typedef T* iterator;
3680 
3681  iterator begin() { return m_pArray; }
3682  iterator end() { return m_pArray + m_Count; }
3683 
3684 private:
3685  AllocatorT m_Allocator;
3686  T* m_pArray;
3687  size_t m_Count;
3688  size_t m_Capacity;
3689 };
3690 
3691 template<typename T, typename allocatorT>
3692 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3693 {
3694  vec.insert(index, item);
3695 }
3696 
3697 template<typename T, typename allocatorT>
3698 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3699 {
3700  vec.remove(index);
3701 }
3702 
3703 #endif // #if VMA_USE_STL_VECTOR
3704 
3705 template<typename CmpLess, typename VectorT>
3706 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3707 {
3708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3709  vector.data(),
3710  vector.data() + vector.size(),
3711  value,
3712  CmpLess()) - vector.data();
3713  VmaVectorInsert(vector, indexToInsert, value);
3714  return indexToInsert;
3715 }
3716 
3717 template<typename CmpLess, typename VectorT>
3718 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3719 {
3720  CmpLess comparator;
3721  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3722  vector.begin(),
3723  vector.end(),
3724  value,
3725  comparator);
3726  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3727  {
3728  size_t indexToRemove = it - vector.begin();
3729  VmaVectorRemove(vector, indexToRemove);
3730  return true;
3731  }
3732  return false;
3733 }
3734 
3735 template<typename CmpLess, typename IterT, typename KeyT>
3736 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3737 {
3738  CmpLess comparator;
3739  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3740  beg, end, value, comparator);
3741  if(it == end ||
3742  (!comparator(*it, value) && !comparator(value, *it)))
3743  {
3744  return it;
3745  }
3746  return end;
3747 }
3748 
3750 // class VmaPoolAllocator
3751 
3752 /*
3753 Allocator for objects of type T using a list of arrays (pools) to speed up
3754 allocation. Number of elements that can be allocated is not bounded because
3755 allocator can create multiple blocks.
3756 */
3757 template<typename T>
3758 class VmaPoolAllocator
3759 {
3760  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3761 public:
3762  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3763  ~VmaPoolAllocator();
3764  void Clear();
3765  T* Alloc();
3766  void Free(T* ptr);
3767 
3768 private:
3769  union Item
3770  {
3771  uint32_t NextFreeIndex;
3772  T Value;
3773  };
3774 
3775  struct ItemBlock
3776  {
3777  Item* pItems;
3778  uint32_t FirstFreeIndex;
3779  };
3780 
3781  const VkAllocationCallbacks* m_pAllocationCallbacks;
3782  size_t m_ItemsPerBlock;
3783  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3784 
3785  ItemBlock& CreateNewBlock();
3786 };
3787 
3788 template<typename T>
3789 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3790  m_pAllocationCallbacks(pAllocationCallbacks),
3791  m_ItemsPerBlock(itemsPerBlock),
3792  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3793 {
3794  VMA_ASSERT(itemsPerBlock > 0);
3795 }
3796 
3797 template<typename T>
3798 VmaPoolAllocator<T>::~VmaPoolAllocator()
3799 {
3800  Clear();
3801 }
3802 
3803 template<typename T>
3804 void VmaPoolAllocator<T>::Clear()
3805 {
3806  for(size_t i = m_ItemBlocks.size(); i--; )
3807  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3808  m_ItemBlocks.clear();
3809 }
3810 
3811 template<typename T>
3812 T* VmaPoolAllocator<T>::Alloc()
3813 {
3814  for(size_t i = m_ItemBlocks.size(); i--; )
3815  {
3816  ItemBlock& block = m_ItemBlocks[i];
3817  // This block has some free items: Use first one.
3818  if(block.FirstFreeIndex != UINT32_MAX)
3819  {
3820  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3821  block.FirstFreeIndex = pItem->NextFreeIndex;
3822  return &pItem->Value;
3823  }
3824  }
3825 
3826  // No block has free item: Create new one and use it.
3827  ItemBlock& newBlock = CreateNewBlock();
3828  Item* const pItem = &newBlock.pItems[0];
3829  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3830  return &pItem->Value;
3831 }
3832 
3833 template<typename T>
3834 void VmaPoolAllocator<T>::Free(T* ptr)
3835 {
3836  // Search all memory blocks to find ptr.
3837  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3838  {
3839  ItemBlock& block = m_ItemBlocks[i];
3840 
3841  // Casting to union.
3842  Item* pItemPtr;
3843  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3844 
3845  // Check if pItemPtr is in address range of this block.
3846  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3847  {
3848  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3849  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3850  block.FirstFreeIndex = index;
3851  return;
3852  }
3853  }
3854  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3855 }
3856 
3857 template<typename T>
3858 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3859 {
3860  ItemBlock newBlock = {
3861  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3862 
3863  m_ItemBlocks.push_back(newBlock);
3864 
3865  // Setup singly-linked list of all free items in this block.
3866  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3867  newBlock.pItems[i].NextFreeIndex = i + 1;
3868  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3869  return m_ItemBlocks.back();
3870 }
3871 
3873 // class VmaRawList, VmaList
3874 
3875 #if VMA_USE_STL_LIST
3876 
3877 #define VmaList std::list
3878 
3879 #else // #if VMA_USE_STL_LIST
3880 
3881 template<typename T>
3882 struct VmaListItem
3883 {
3884  VmaListItem* pPrev;
3885  VmaListItem* pNext;
3886  T Value;
3887 };
3888 
3889 // Doubly linked list.
3890 template<typename T>
3891 class VmaRawList
3892 {
3893  VMA_CLASS_NO_COPY(VmaRawList)
3894 public:
3895  typedef VmaListItem<T> ItemType;
3896 
3897  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3898  ~VmaRawList();
3899  void Clear();
3900 
3901  size_t GetCount() const { return m_Count; }
3902  bool IsEmpty() const { return m_Count == 0; }
3903 
3904  ItemType* Front() { return m_pFront; }
3905  const ItemType* Front() const { return m_pFront; }
3906  ItemType* Back() { return m_pBack; }
3907  const ItemType* Back() const { return m_pBack; }
3908 
3909  ItemType* PushBack();
3910  ItemType* PushFront();
3911  ItemType* PushBack(const T& value);
3912  ItemType* PushFront(const T& value);
3913  void PopBack();
3914  void PopFront();
3915 
3916  // Item can be null - it means PushBack.
3917  ItemType* InsertBefore(ItemType* pItem);
3918  // Item can be null - it means PushFront.
3919  ItemType* InsertAfter(ItemType* pItem);
3920 
3921  ItemType* InsertBefore(ItemType* pItem, const T& value);
3922  ItemType* InsertAfter(ItemType* pItem, const T& value);
3923 
3924  void Remove(ItemType* pItem);
3925 
3926 private:
3927  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3928  VmaPoolAllocator<ItemType> m_ItemAllocator;
3929  ItemType* m_pFront;
3930  ItemType* m_pBack;
3931  size_t m_Count;
3932 };
3933 
3934 template<typename T>
3935 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3936  m_pAllocationCallbacks(pAllocationCallbacks),
3937  m_ItemAllocator(pAllocationCallbacks, 128),
3938  m_pFront(VMA_NULL),
3939  m_pBack(VMA_NULL),
3940  m_Count(0)
3941 {
3942 }
3943 
3944 template<typename T>
3945 VmaRawList<T>::~VmaRawList()
3946 {
3947  // Intentionally not calling Clear, because that would be unnecessary
3948  // computations to return all items to m_ItemAllocator as free.
3949 }
3950 
3951 template<typename T>
3952 void VmaRawList<T>::Clear()
3953 {
3954  if(IsEmpty() == false)
3955  {
3956  ItemType* pItem = m_pBack;
3957  while(pItem != VMA_NULL)
3958  {
3959  ItemType* const pPrevItem = pItem->pPrev;
3960  m_ItemAllocator.Free(pItem);
3961  pItem = pPrevItem;
3962  }
3963  m_pFront = VMA_NULL;
3964  m_pBack = VMA_NULL;
3965  m_Count = 0;
3966  }
3967 }
3968 
3969 template<typename T>
3970 VmaListItem<T>* VmaRawList<T>::PushBack()
3971 {
3972  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3973  pNewItem->pNext = VMA_NULL;
3974  if(IsEmpty())
3975  {
3976  pNewItem->pPrev = VMA_NULL;
3977  m_pFront = pNewItem;
3978  m_pBack = pNewItem;
3979  m_Count = 1;
3980  }
3981  else
3982  {
3983  pNewItem->pPrev = m_pBack;
3984  m_pBack->pNext = pNewItem;
3985  m_pBack = pNewItem;
3986  ++m_Count;
3987  }
3988  return pNewItem;
3989 }
3990 
3991 template<typename T>
3992 VmaListItem<T>* VmaRawList<T>::PushFront()
3993 {
3994  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3995  pNewItem->pPrev = VMA_NULL;
3996  if(IsEmpty())
3997  {
3998  pNewItem->pNext = VMA_NULL;
3999  m_pFront = pNewItem;
4000  m_pBack = pNewItem;
4001  m_Count = 1;
4002  }
4003  else
4004  {
4005  pNewItem->pNext = m_pFront;
4006  m_pFront->pPrev = pNewItem;
4007  m_pFront = pNewItem;
4008  ++m_Count;
4009  }
4010  return pNewItem;
4011 }
4012 
4013 template<typename T>
4014 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4015 {
4016  ItemType* const pNewItem = PushBack();
4017  pNewItem->Value = value;
4018  return pNewItem;
4019 }
4020 
4021 template<typename T>
4022 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4023 {
4024  ItemType* const pNewItem = PushFront();
4025  pNewItem->Value = value;
4026  return pNewItem;
4027 }
4028 
4029 template<typename T>
4030 void VmaRawList<T>::PopBack()
4031 {
4032  VMA_HEAVY_ASSERT(m_Count > 0);
4033  ItemType* const pBackItem = m_pBack;
4034  ItemType* const pPrevItem = pBackItem->pPrev;
4035  if(pPrevItem != VMA_NULL)
4036  {
4037  pPrevItem->pNext = VMA_NULL;
4038  }
4039  m_pBack = pPrevItem;
4040  m_ItemAllocator.Free(pBackItem);
4041  --m_Count;
4042 }
4043 
4044 template<typename T>
4045 void VmaRawList<T>::PopFront()
4046 {
4047  VMA_HEAVY_ASSERT(m_Count > 0);
4048  ItemType* const pFrontItem = m_pFront;
4049  ItemType* const pNextItem = pFrontItem->pNext;
4050  if(pNextItem != VMA_NULL)
4051  {
4052  pNextItem->pPrev = VMA_NULL;
4053  }
4054  m_pFront = pNextItem;
4055  m_ItemAllocator.Free(pFrontItem);
4056  --m_Count;
4057 }
4058 
4059 template<typename T>
4060 void VmaRawList<T>::Remove(ItemType* pItem)
4061 {
4062  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4063  VMA_HEAVY_ASSERT(m_Count > 0);
4064 
4065  if(pItem->pPrev != VMA_NULL)
4066  {
4067  pItem->pPrev->pNext = pItem->pNext;
4068  }
4069  else
4070  {
4071  VMA_HEAVY_ASSERT(m_pFront == pItem);
4072  m_pFront = pItem->pNext;
4073  }
4074 
4075  if(pItem->pNext != VMA_NULL)
4076  {
4077  pItem->pNext->pPrev = pItem->pPrev;
4078  }
4079  else
4080  {
4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
4082  m_pBack = pItem->pPrev;
4083  }
4084 
4085  m_ItemAllocator.Free(pItem);
4086  --m_Count;
4087 }
4088 
4089 template<typename T>
4090 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4091 {
4092  if(pItem != VMA_NULL)
4093  {
4094  ItemType* const prevItem = pItem->pPrev;
4095  ItemType* const newItem = m_ItemAllocator.Alloc();
4096  newItem->pPrev = prevItem;
4097  newItem->pNext = pItem;
4098  pItem->pPrev = newItem;
4099  if(prevItem != VMA_NULL)
4100  {
4101  prevItem->pNext = newItem;
4102  }
4103  else
4104  {
4105  VMA_HEAVY_ASSERT(m_pFront == pItem);
4106  m_pFront = newItem;
4107  }
4108  ++m_Count;
4109  return newItem;
4110  }
4111  else
4112  return PushBack();
4113 }
4114 
4115 template<typename T>
4116 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4117 {
4118  if(pItem != VMA_NULL)
4119  {
4120  ItemType* const nextItem = pItem->pNext;
4121  ItemType* const newItem = m_ItemAllocator.Alloc();
4122  newItem->pNext = nextItem;
4123  newItem->pPrev = pItem;
4124  pItem->pNext = newItem;
4125  if(nextItem != VMA_NULL)
4126  {
4127  nextItem->pPrev = newItem;
4128  }
4129  else
4130  {
4131  VMA_HEAVY_ASSERT(m_pBack == pItem);
4132  m_pBack = newItem;
4133  }
4134  ++m_Count;
4135  return newItem;
4136  }
4137  else
4138  return PushFront();
4139 }
4140 
4141 template<typename T>
4142 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4143 {
4144  ItemType* const newItem = InsertBefore(pItem);
4145  newItem->Value = value;
4146  return newItem;
4147 }
4148 
4149 template<typename T>
4150 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4151 {
4152  ItemType* const newItem = InsertAfter(pItem);
4153  newItem->Value = value;
4154  return newItem;
4155 }
4156 
4157 template<typename T, typename AllocatorT>
4158 class VmaList
4159 {
4160  VMA_CLASS_NO_COPY(VmaList)
4161 public:
4162  class iterator
4163  {
4164  public:
4165  iterator() :
4166  m_pList(VMA_NULL),
4167  m_pItem(VMA_NULL)
4168  {
4169  }
4170 
4171  T& operator*() const
4172  {
4173  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4174  return m_pItem->Value;
4175  }
4176  T* operator->() const
4177  {
4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4179  return &m_pItem->Value;
4180  }
4181 
4182  iterator& operator++()
4183  {
4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4185  m_pItem = m_pItem->pNext;
4186  return *this;
4187  }
4188  iterator& operator--()
4189  {
4190  if(m_pItem != VMA_NULL)
4191  {
4192  m_pItem = m_pItem->pPrev;
4193  }
4194  else
4195  {
4196  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4197  m_pItem = m_pList->Back();
4198  }
4199  return *this;
4200  }
4201 
4202  iterator operator++(int)
4203  {
4204  iterator result = *this;
4205  ++*this;
4206  return result;
4207  }
4208  iterator operator--(int)
4209  {
4210  iterator result = *this;
4211  --*this;
4212  return result;
4213  }
4214 
4215  bool operator==(const iterator& rhs) const
4216  {
4217  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4218  return m_pItem == rhs.m_pItem;
4219  }
4220  bool operator!=(const iterator& rhs) const
4221  {
4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4223  return m_pItem != rhs.m_pItem;
4224  }
4225 
4226  private:
4227  VmaRawList<T>* m_pList;
4228  VmaListItem<T>* m_pItem;
4229 
4230  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4231  m_pList(pList),
4232  m_pItem(pItem)
4233  {
4234  }
4235 
4236  friend class VmaList<T, AllocatorT>;
4237  };
4238 
4239  class const_iterator
4240  {
4241  public:
4242  const_iterator() :
4243  m_pList(VMA_NULL),
4244  m_pItem(VMA_NULL)
4245  {
4246  }
4247 
4248  const_iterator(const iterator& src) :
4249  m_pList(src.m_pList),
4250  m_pItem(src.m_pItem)
4251  {
4252  }
4253 
4254  const T& operator*() const
4255  {
4256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4257  return m_pItem->Value;
4258  }
4259  const T* operator->() const
4260  {
4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4262  return &m_pItem->Value;
4263  }
4264 
4265  const_iterator& operator++()
4266  {
4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4268  m_pItem = m_pItem->pNext;
4269  return *this;
4270  }
4271  const_iterator& operator--()
4272  {
4273  if(m_pItem != VMA_NULL)
4274  {
4275  m_pItem = m_pItem->pPrev;
4276  }
4277  else
4278  {
4279  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4280  m_pItem = m_pList->Back();
4281  }
4282  return *this;
4283  }
4284 
4285  const_iterator operator++(int)
4286  {
4287  const_iterator result = *this;
4288  ++*this;
4289  return result;
4290  }
4291  const_iterator operator--(int)
4292  {
4293  const_iterator result = *this;
4294  --*this;
4295  return result;
4296  }
4297 
4298  bool operator==(const const_iterator& rhs) const
4299  {
4300  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4301  return m_pItem == rhs.m_pItem;
4302  }
4303  bool operator!=(const const_iterator& rhs) const
4304  {
4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4306  return m_pItem != rhs.m_pItem;
4307  }
4308 
4309  private:
4310  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4311  m_pList(pList),
4312  m_pItem(pItem)
4313  {
4314  }
4315 
4316  const VmaRawList<T>* m_pList;
4317  const VmaListItem<T>* m_pItem;
4318 
4319  friend class VmaList<T, AllocatorT>;
4320  };
4321 
4322  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4323 
4324  bool empty() const { return m_RawList.IsEmpty(); }
4325  size_t size() const { return m_RawList.GetCount(); }
4326 
4327  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4328  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4329 
4330  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4331  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4332 
4333  void clear() { m_RawList.Clear(); }
4334  void push_back(const T& value) { m_RawList.PushBack(value); }
4335  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4336  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4337 
4338 private:
4339  VmaRawList<T> m_RawList;
4340 };
4341 
4342 #endif // #if VMA_USE_STL_LIST
4343 
4345 // class VmaMap
4346 
4347 // Unused in this version.
4348 #if 0
4349 
4350 #if VMA_USE_STL_UNORDERED_MAP
4351 
4352 #define VmaPair std::pair
4353 
4354 #define VMA_MAP_TYPE(KeyT, ValueT) \
4355  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4356 
4357 #else // #if VMA_USE_STL_UNORDERED_MAP
4358 
4359 template<typename T1, typename T2>
4360 struct VmaPair
4361 {
4362  T1 first;
4363  T2 second;
4364 
4365  VmaPair() : first(), second() { }
4366  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4367 };
4368 
4369 /* Class compatible with subset of interface of std::unordered_map.
4370 KeyT, ValueT must be POD because they will be stored in VmaVector.
4371 */
4372 template<typename KeyT, typename ValueT>
4373 class VmaMap
4374 {
4375 public:
4376  typedef VmaPair<KeyT, ValueT> PairType;
4377  typedef PairType* iterator;
4378 
4379  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4380 
4381  iterator begin() { return m_Vector.begin(); }
4382  iterator end() { return m_Vector.end(); }
4383 
4384  void insert(const PairType& pair);
4385  iterator find(const KeyT& key);
4386  void erase(iterator it);
4387 
4388 private:
4389  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4390 };
4391 
4392 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4393 
4394 template<typename FirstT, typename SecondT>
4395 struct VmaPairFirstLess
4396 {
4397  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4398  {
4399  return lhs.first < rhs.first;
4400  }
4401  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4402  {
4403  return lhs.first < rhsFirst;
4404  }
4405 };
4406 
4407 template<typename KeyT, typename ValueT>
4408 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4409 {
4410  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4411  m_Vector.data(),
4412  m_Vector.data() + m_Vector.size(),
4413  pair,
4414  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4415  VmaVectorInsert(m_Vector, indexToInsert, pair);
4416 }
4417 
4418 template<typename KeyT, typename ValueT>
4419 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4420 {
4421  PairType* it = VmaBinaryFindFirstNotLess(
4422  m_Vector.data(),
4423  m_Vector.data() + m_Vector.size(),
4424  key,
4425  VmaPairFirstLess<KeyT, ValueT>());
4426  if((it != m_Vector.end()) && (it->first == key))
4427  {
4428  return it;
4429  }
4430  else
4431  {
4432  return m_Vector.end();
4433  }
4434 }
4435 
4436 template<typename KeyT, typename ValueT>
4437 void VmaMap<KeyT, ValueT>::erase(iterator it)
4438 {
4439  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4440 }
4441 
4442 #endif // #if VMA_USE_STL_UNORDERED_MAP
4443 
4444 #endif // #if 0
4445 
4447 
4448 class VmaDeviceMemoryBlock;
4449 
4450 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4451 
4452 struct VmaAllocation_T
4453 {
4454  VMA_CLASS_NO_COPY(VmaAllocation_T)
4455 private:
4456  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4457 
4458  enum FLAGS
4459  {
4460  FLAG_USER_DATA_STRING = 0x01,
4461  };
4462 
4463 public:
4464  enum ALLOCATION_TYPE
4465  {
4466  ALLOCATION_TYPE_NONE,
4467  ALLOCATION_TYPE_BLOCK,
4468  ALLOCATION_TYPE_DEDICATED,
4469  };
4470 
4471  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4472  m_Alignment(1),
4473  m_Size(0),
4474  m_pUserData(VMA_NULL),
4475  m_LastUseFrameIndex(currentFrameIndex),
4476  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4477  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4478  m_MapCount(0),
4479  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4480  {
4481 #if VMA_STATS_STRING_ENABLED
4482  m_CreationFrameIndex = currentFrameIndex;
4483  m_BufferImageUsage = 0;
4484 #endif
4485  }
4486 
4487  ~VmaAllocation_T()
4488  {
4489  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4490 
4491  // Check if owned string was freed.
4492  VMA_ASSERT(m_pUserData == VMA_NULL);
4493  }
4494 
4495  void InitBlockAllocation(
4496  VmaPool hPool,
4497  VmaDeviceMemoryBlock* block,
4498  VkDeviceSize offset,
4499  VkDeviceSize alignment,
4500  VkDeviceSize size,
4501  VmaSuballocationType suballocationType,
4502  bool mapped,
4503  bool canBecomeLost)
4504  {
4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4506  VMA_ASSERT(block != VMA_NULL);
4507  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4508  m_Alignment = alignment;
4509  m_Size = size;
4510  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4511  m_SuballocationType = (uint8_t)suballocationType;
4512  m_BlockAllocation.m_hPool = hPool;
4513  m_BlockAllocation.m_Block = block;
4514  m_BlockAllocation.m_Offset = offset;
4515  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4516  }
4517 
4518  void InitLost()
4519  {
4520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4521  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4523  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4524  m_BlockAllocation.m_Block = VMA_NULL;
4525  m_BlockAllocation.m_Offset = 0;
4526  m_BlockAllocation.m_CanBecomeLost = true;
4527  }
4528 
4529  void ChangeBlockAllocation(
4530  VmaAllocator hAllocator,
4531  VmaDeviceMemoryBlock* block,
4532  VkDeviceSize offset);
4533 
4534  void ChangeSize(VkDeviceSize newSize);
4535 
4536  // pMappedData not null means allocation is created with MAPPED flag.
4537  void InitDedicatedAllocation(
4538  uint32_t memoryTypeIndex,
4539  VkDeviceMemory hMemory,
4540  VmaSuballocationType suballocationType,
4541  void* pMappedData,
4542  VkDeviceSize size)
4543  {
4544  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4545  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4546  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4547  m_Alignment = 0;
4548  m_Size = size;
4549  m_SuballocationType = (uint8_t)suballocationType;
4550  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4551  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4552  m_DedicatedAllocation.m_hMemory = hMemory;
4553  m_DedicatedAllocation.m_pMappedData = pMappedData;
4554  }
4555 
4556  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4557  VkDeviceSize GetAlignment() const { return m_Alignment; }
4558  VkDeviceSize GetSize() const { return m_Size; }
4559  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4560  void* GetUserData() const { return m_pUserData; }
4561  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4562  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4563 
4564  VmaDeviceMemoryBlock* GetBlock() const
4565  {
4566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4567  return m_BlockAllocation.m_Block;
4568  }
4569  VkDeviceSize GetOffset() const;
4570  VkDeviceMemory GetMemory() const;
4571  uint32_t GetMemoryTypeIndex() const;
4572  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4573  void* GetMappedData() const;
4574  bool CanBecomeLost() const;
4575  VmaPool GetPool() const;
4576 
4577  uint32_t GetLastUseFrameIndex() const
4578  {
4579  return m_LastUseFrameIndex.load();
4580  }
4581  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4582  {
4583  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4584  }
4585  /*
4586  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4587  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4588  - Else, returns false.
4589 
4590  If hAllocation is already lost, assert - you should not call it then.
4591  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4592  */
4593  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4594 
4595  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4596  {
4597  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4598  outInfo.blockCount = 1;
4599  outInfo.allocationCount = 1;
4600  outInfo.unusedRangeCount = 0;
4601  outInfo.usedBytes = m_Size;
4602  outInfo.unusedBytes = 0;
4603  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4604  outInfo.unusedRangeSizeMin = UINT64_MAX;
4605  outInfo.unusedRangeSizeMax = 0;
4606  }
4607 
4608  void BlockAllocMap();
4609  void BlockAllocUnmap();
4610  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4611  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4612 
4613 #if VMA_STATS_STRING_ENABLED
4614  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4615  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4616 
4617  void InitBufferImageUsage(uint32_t bufferImageUsage)
4618  {
4619  VMA_ASSERT(m_BufferImageUsage == 0);
4620  m_BufferImageUsage = bufferImageUsage;
4621  }
4622 
4623  void PrintParameters(class VmaJsonWriter& json) const;
4624 #endif
4625 
4626 private:
4627  VkDeviceSize m_Alignment;
4628  VkDeviceSize m_Size;
4629  void* m_pUserData;
4630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4631  uint8_t m_Type; // ALLOCATION_TYPE
4632  uint8_t m_SuballocationType; // VmaSuballocationType
4633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4635  uint8_t m_MapCount;
4636  uint8_t m_Flags; // enum FLAGS
4637 
4638  // Allocation out of VmaDeviceMemoryBlock.
4639  struct BlockAllocation
4640  {
4641  VmaPool m_hPool; // Null if belongs to general memory.
4642  VmaDeviceMemoryBlock* m_Block;
4643  VkDeviceSize m_Offset;
4644  bool m_CanBecomeLost;
4645  };
4646 
4647  // Allocation for an object that has its own private VkDeviceMemory.
4648  struct DedicatedAllocation
4649  {
4650  uint32_t m_MemoryTypeIndex;
4651  VkDeviceMemory m_hMemory;
4652  void* m_pMappedData; // Not null means memory is mapped.
4653  };
4654 
4655  union
4656  {
4657  // Allocation out of VmaDeviceMemoryBlock.
4658  BlockAllocation m_BlockAllocation;
4659  // Allocation for an object that has its own private VkDeviceMemory.
4660  DedicatedAllocation m_DedicatedAllocation;
4661  };
4662 
4663 #if VMA_STATS_STRING_ENABLED
4664  uint32_t m_CreationFrameIndex;
4665  uint32_t m_BufferImageUsage; // 0 if unknown.
4666 #endif
4667 
4668  void FreeUserDataString(VmaAllocator hAllocator);
4669 };
4670 
4671 /*
4672 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4673 allocated memory block or free.
4674 */
4675 struct VmaSuballocation
4676 {
4677  VkDeviceSize offset;
4678  VkDeviceSize size;
4679  VmaAllocation hAllocation;
4680  VmaSuballocationType type;
4681 };
4682 
4683 // Comparator for offsets.
4684 struct VmaSuballocationOffsetLess
4685 {
4686  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4687  {
4688  return lhs.offset < rhs.offset;
4689  }
4690 };
4691 struct VmaSuballocationOffsetGreater
4692 {
4693  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4694  {
4695  return lhs.offset > rhs.offset;
4696  }
4697 };
4698 
4699 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4700 
4701 // Cost of one additional allocation lost, as equivalent in bytes.
4702 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4703 
4704 /*
4705 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4706 
4707 If canMakeOtherLost was false:
4708 - item points to a FREE suballocation.
4709 - itemsToMakeLostCount is 0.
4710 
4711 If canMakeOtherLost was true:
4712 - item points to first of sequence of suballocations, which are either FREE,
4713  or point to VmaAllocations that can become lost.
4714 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4715  the requested allocation to succeed.
4716 */
4717 struct VmaAllocationRequest
4718 {
4719  VkDeviceSize offset;
4720  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4721  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4722  VmaSuballocationList::iterator item;
4723  size_t itemsToMakeLostCount;
4724  void* customData;
4725 
4726  VkDeviceSize CalcCost() const
4727  {
4728  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4729  }
4730 };
4731 
4732 /*
4733 Data structure used for bookkeeping of allocations and unused ranges of memory
4734 in a single VkDeviceMemory block.
4735 */
4736 class VmaBlockMetadata
4737 {
4738 public:
4739  VmaBlockMetadata(VmaAllocator hAllocator);
4740  virtual ~VmaBlockMetadata() { }
4741  virtual void Init(VkDeviceSize size) { m_Size = size; }
4742 
4743  // Validates all data structures inside this object. If not valid, returns false.
4744  virtual bool Validate() const = 0;
4745  VkDeviceSize GetSize() const { return m_Size; }
4746  virtual size_t GetAllocationCount() const = 0;
4747  virtual VkDeviceSize GetSumFreeSize() const = 0;
4748  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4749  // Returns true if this block is empty - contains only single free suballocation.
4750  virtual bool IsEmpty() const = 0;
4751 
4752  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4753  // Shouldn't modify blockCount.
4754  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4755 
4756 #if VMA_STATS_STRING_ENABLED
4757  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4758 #endif
4759 
4760  // Tries to find a place for suballocation with given parameters inside this block.
4761  // If succeeded, fills pAllocationRequest and returns true.
4762  // If failed, returns false.
4763  virtual bool CreateAllocationRequest(
4764  uint32_t currentFrameIndex,
4765  uint32_t frameInUseCount,
4766  VkDeviceSize bufferImageGranularity,
4767  VkDeviceSize allocSize,
4768  VkDeviceSize allocAlignment,
4769  bool upperAddress,
4770  VmaSuballocationType allocType,
4771  bool canMakeOtherLost,
4772  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4773  VmaAllocationRequest* pAllocationRequest) = 0;
4774 
4775  virtual bool MakeRequestedAllocationsLost(
4776  uint32_t currentFrameIndex,
4777  uint32_t frameInUseCount,
4778  VmaAllocationRequest* pAllocationRequest) = 0;
4779 
4780  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4781 
4782  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4783 
4784  // Makes actual allocation based on request. Request must already be checked and valid.
4785  virtual void Alloc(
4786  const VmaAllocationRequest& request,
4787  VmaSuballocationType type,
4788  VkDeviceSize allocSize,
4789  bool upperAddress,
4790  VmaAllocation hAllocation) = 0;
4791 
4792  // Frees suballocation assigned to given memory region.
4793  virtual void Free(const VmaAllocation allocation) = 0;
4794  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4795 
4796  // Tries to resize (grow or shrink) space for given allocation, in place.
4797  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4798 
4799 protected:
4800  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4801 
4802 #if VMA_STATS_STRING_ENABLED
4803  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4804  VkDeviceSize unusedBytes,
4805  size_t allocationCount,
4806  size_t unusedRangeCount) const;
4807  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4808  VkDeviceSize offset,
4809  VmaAllocation hAllocation) const;
4810  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4811  VkDeviceSize offset,
4812  VkDeviceSize size) const;
4813  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4814 #endif
4815 
4816 private:
4817  VkDeviceSize m_Size;
4818  const VkAllocationCallbacks* m_pAllocationCallbacks;
4819 };
4820 
4821 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4822  VMA_ASSERT(0 && "Validation failed: " #cond); \
4823  return false; \
4824  } } while(false)
4825 
4826 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4827 {
4828  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4829 public:
4830  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4831  virtual ~VmaBlockMetadata_Generic();
4832  virtual void Init(VkDeviceSize size);
4833 
4834  virtual bool Validate() const;
4835  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4836  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4837  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4838  virtual bool IsEmpty() const;
4839 
4840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4841  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4842 
4843 #if VMA_STATS_STRING_ENABLED
4844  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4845 #endif
4846 
4847  virtual bool CreateAllocationRequest(
4848  uint32_t currentFrameIndex,
4849  uint32_t frameInUseCount,
4850  VkDeviceSize bufferImageGranularity,
4851  VkDeviceSize allocSize,
4852  VkDeviceSize allocAlignment,
4853  bool upperAddress,
4854  VmaSuballocationType allocType,
4855  bool canMakeOtherLost,
4856  uint32_t strategy,
4857  VmaAllocationRequest* pAllocationRequest);
4858 
4859  virtual bool MakeRequestedAllocationsLost(
4860  uint32_t currentFrameIndex,
4861  uint32_t frameInUseCount,
4862  VmaAllocationRequest* pAllocationRequest);
4863 
4864  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4865 
4866  virtual VkResult CheckCorruption(const void* pBlockData);
4867 
4868  virtual void Alloc(
4869  const VmaAllocationRequest& request,
4870  VmaSuballocationType type,
4871  VkDeviceSize allocSize,
4872  bool upperAddress,
4873  VmaAllocation hAllocation);
4874 
4875  virtual void Free(const VmaAllocation allocation);
4876  virtual void FreeAtOffset(VkDeviceSize offset);
4877 
4878  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4879 
4880 private:
4881  uint32_t m_FreeCount;
4882  VkDeviceSize m_SumFreeSize;
4883  VmaSuballocationList m_Suballocations;
4884  // Suballocations that are free and have size greater than certain threshold.
4885  // Sorted by size, ascending.
4886  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4887 
4888  bool ValidateFreeSuballocationList() const;
4889 
4890  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4891  // If yes, fills pOffset and returns true. If no, returns false.
4892  bool CheckAllocation(
4893  uint32_t currentFrameIndex,
4894  uint32_t frameInUseCount,
4895  VkDeviceSize bufferImageGranularity,
4896  VkDeviceSize allocSize,
4897  VkDeviceSize allocAlignment,
4898  VmaSuballocationType allocType,
4899  VmaSuballocationList::const_iterator suballocItem,
4900  bool canMakeOtherLost,
4901  VkDeviceSize* pOffset,
4902  size_t* itemsToMakeLostCount,
4903  VkDeviceSize* pSumFreeSize,
4904  VkDeviceSize* pSumItemSize) const;
4905  // Given free suballocation, it merges it with following one, which must also be free.
4906  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4907  // Releases given suballocation, making it free.
4908  // Merges it with adjacent free suballocations if applicable.
4909  // Returns iterator to new free suballocation at this place.
4910  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4911  // Given free suballocation, it inserts it into sorted list of
4912  // m_FreeSuballocationsBySize if it's suitable.
4913  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4914  // Given free suballocation, it removes it from sorted list of
4915  // m_FreeSuballocationsBySize if it's suitable.
4916  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4917 };
4918 
4919 /*
4920 Allocations and their references in internal data structure look like this:
4921 
4922 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4923 
4924  0 +-------+
4925  | |
4926  | |
4927  | |
4928  +-------+
4929  | Alloc | 1st[m_1stNullItemsBeginCount]
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4932  +-------+
4933  | ... |
4934  +-------+
4935  | Alloc | 1st[1st.size() - 1]
4936  +-------+
4937  | |
4938  | |
4939  | |
4940 GetSize() +-------+
4941 
4942 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4943 
4944  0 +-------+
4945  | Alloc | 2nd[0]
4946  +-------+
4947  | Alloc | 2nd[1]
4948  +-------+
4949  | ... |
4950  +-------+
4951  | Alloc | 2nd[2nd.size() - 1]
4952  +-------+
4953  | |
4954  | |
4955  | |
4956  +-------+
4957  | Alloc | 1st[m_1stNullItemsBeginCount]
4958  +-------+
4959  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4960  +-------+
4961  | ... |
4962  +-------+
4963  | Alloc | 1st[1st.size() - 1]
4964  +-------+
4965  | |
4966 GetSize() +-------+
4967 
4968 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4969 
4970  0 +-------+
4971  | |
4972  | |
4973  | |
4974  +-------+
4975  | Alloc | 1st[m_1stNullItemsBeginCount]
4976  +-------+
4977  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4978  +-------+
4979  | ... |
4980  +-------+
4981  | Alloc | 1st[1st.size() - 1]
4982  +-------+
4983  | |
4984  | |
4985  | |
4986  +-------+
4987  | Alloc | 2nd[2nd.size() - 1]
4988  +-------+
4989  | ... |
4990  +-------+
4991  | Alloc | 2nd[1]
4992  +-------+
4993  | Alloc | 2nd[0]
4994 GetSize() +-------+
4995 
4996 */
4997 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4998 {
4999  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5000 public:
5001  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5002  virtual ~VmaBlockMetadata_Linear();
5003  virtual void Init(VkDeviceSize size);
5004 
5005  virtual bool Validate() const;
5006  virtual size_t GetAllocationCount() const;
5007  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5008  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5009  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5010 
5011  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5012  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5013 
5014 #if VMA_STATS_STRING_ENABLED
5015  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5016 #endif
5017 
5018  virtual bool CreateAllocationRequest(
5019  uint32_t currentFrameIndex,
5020  uint32_t frameInUseCount,
5021  VkDeviceSize bufferImageGranularity,
5022  VkDeviceSize allocSize,
5023  VkDeviceSize allocAlignment,
5024  bool upperAddress,
5025  VmaSuballocationType allocType,
5026  bool canMakeOtherLost,
5027  uint32_t strategy,
5028  VmaAllocationRequest* pAllocationRequest);
5029 
5030  virtual bool MakeRequestedAllocationsLost(
5031  uint32_t currentFrameIndex,
5032  uint32_t frameInUseCount,
5033  VmaAllocationRequest* pAllocationRequest);
5034 
5035  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5036 
5037  virtual VkResult CheckCorruption(const void* pBlockData);
5038 
5039  virtual void Alloc(
5040  const VmaAllocationRequest& request,
5041  VmaSuballocationType type,
5042  VkDeviceSize allocSize,
5043  bool upperAddress,
5044  VmaAllocation hAllocation);
5045 
5046  virtual void Free(const VmaAllocation allocation);
5047  virtual void FreeAtOffset(VkDeviceSize offset);
5048 
5049 private:
5050  /*
5051  There are two suballocation vectors, used in ping-pong way.
5052  The one with index m_1stVectorIndex is called 1st.
5053  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5054  2nd can be non-empty only when 1st is not empty.
5055  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5056  */
5057  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5058 
5059  enum SECOND_VECTOR_MODE
5060  {
5061  SECOND_VECTOR_EMPTY,
5062  /*
5063  Suballocations in 2nd vector are created later than the ones in 1st, but they
5064  all have smaller offset.
5065  */
5066  SECOND_VECTOR_RING_BUFFER,
5067  /*
5068  Suballocations in 2nd vector are upper side of double stack.
5069  They all have offsets higher than those in 1st vector.
5070  Top of this stack means smaller offsets, but higher indices in this vector.
5071  */
5072  SECOND_VECTOR_DOUBLE_STACK,
5073  };
5074 
5075  VkDeviceSize m_SumFreeSize;
5076  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5077  uint32_t m_1stVectorIndex;
5078  SECOND_VECTOR_MODE m_2ndVectorMode;
5079 
5080  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5081  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5082  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5083  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5084 
5085  // Number of items in 1st vector with hAllocation = null at the beginning.
5086  size_t m_1stNullItemsBeginCount;
5087  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5088  size_t m_1stNullItemsMiddleCount;
5089  // Number of items in 2nd vector with hAllocation = null.
5090  size_t m_2ndNullItemsCount;
5091 
5092  bool ShouldCompact1st() const;
5093  void CleanupAfterFree();
5094 };
5095 
5096 /*
5097 - GetSize() is the original size of allocated memory block.
5098 - m_UsableSize is this size aligned down to a power of two.
5099  All allocations and calculations happen relative to m_UsableSize.
5100 - GetUnusableSize() is the difference between them.
5101  It is repoted as separate, unused range, not available for allocations.
5102 
5103 Node at level 0 has size = m_UsableSize.
5104 Each next level contains nodes with size 2 times smaller than current level.
5105 m_LevelCount is the maximum number of levels to use in the current object.
5106 */
5107 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5108 {
5109  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5110 public:
5111  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5112  virtual ~VmaBlockMetadata_Buddy();
5113  virtual void Init(VkDeviceSize size);
5114 
5115  virtual bool Validate() const;
5116  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5117  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5118  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5119  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5120 
5121  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5122  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5123 
5124 #if VMA_STATS_STRING_ENABLED
5125  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5126 #endif
5127 
5128  virtual bool CreateAllocationRequest(
5129  uint32_t currentFrameIndex,
5130  uint32_t frameInUseCount,
5131  VkDeviceSize bufferImageGranularity,
5132  VkDeviceSize allocSize,
5133  VkDeviceSize allocAlignment,
5134  bool upperAddress,
5135  VmaSuballocationType allocType,
5136  bool canMakeOtherLost,
5137  uint32_t strategy,
5138  VmaAllocationRequest* pAllocationRequest);
5139 
5140  virtual bool MakeRequestedAllocationsLost(
5141  uint32_t currentFrameIndex,
5142  uint32_t frameInUseCount,
5143  VmaAllocationRequest* pAllocationRequest);
5144 
5145  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5146 
5147  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5148 
5149  virtual void Alloc(
5150  const VmaAllocationRequest& request,
5151  VmaSuballocationType type,
5152  VkDeviceSize allocSize,
5153  bool upperAddress,
5154  VmaAllocation hAllocation);
5155 
5156  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5157  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5158 
5159 private:
5160  static const VkDeviceSize MIN_NODE_SIZE = 32;
5161  static const size_t MAX_LEVELS = 30;
5162 
5163  struct ValidationContext
5164  {
5165  size_t calculatedAllocationCount;
5166  size_t calculatedFreeCount;
5167  VkDeviceSize calculatedSumFreeSize;
5168 
5169  ValidationContext() :
5170  calculatedAllocationCount(0),
5171  calculatedFreeCount(0),
5172  calculatedSumFreeSize(0) { }
5173  };
5174 
5175  struct Node
5176  {
5177  VkDeviceSize offset;
5178  enum TYPE
5179  {
5180  TYPE_FREE,
5181  TYPE_ALLOCATION,
5182  TYPE_SPLIT,
5183  TYPE_COUNT
5184  } type;
5185  Node* parent;
5186  Node* buddy;
5187 
5188  union
5189  {
5190  struct
5191  {
5192  Node* prev;
5193  Node* next;
5194  } free;
5195  struct
5196  {
5197  VmaAllocation alloc;
5198  } allocation;
5199  struct
5200  {
5201  Node* leftChild;
5202  } split;
5203  };
5204  };
5205 
5206  // Size of the memory block aligned down to a power of two.
5207  VkDeviceSize m_UsableSize;
5208  uint32_t m_LevelCount;
5209 
5210  Node* m_Root;
5211  struct {
5212  Node* front;
5213  Node* back;
5214  } m_FreeList[MAX_LEVELS];
5215  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5216  size_t m_AllocationCount;
5217  // Number of nodes in the tree with type == TYPE_FREE.
5218  size_t m_FreeCount;
5219  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5220  VkDeviceSize m_SumFreeSize;
5221 
5222  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5223  void DeleteNode(Node* node);
5224  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5225  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5226  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5227  // Alloc passed just for validation. Can be null.
5228  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5229  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5230  // Adds node to the front of FreeList at given level.
5231  // node->type must be FREE.
5232  // node->free.prev, next can be undefined.
5233  void AddToFreeListFront(uint32_t level, Node* node);
5234  // Removes node from FreeList at given level.
5235  // node->type must be FREE.
5236  // node->free.prev, next stay untouched.
5237  void RemoveFromFreeList(uint32_t level, Node* node);
5238 
5239 #if VMA_STATS_STRING_ENABLED
5240  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5241 #endif
5242 };
5243 
5244 /*
5245 Represents a single block of device memory (`VkDeviceMemory`) with all the
5246 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5247 
5248 Thread-safety: This class must be externally synchronized.
5249 */
5250 class VmaDeviceMemoryBlock
5251 {
5252  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5253 public:
5254  VmaBlockMetadata* m_pMetadata;
5255 
5256  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5257 
5258  ~VmaDeviceMemoryBlock()
5259  {
5260  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5261  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5262  }
5263 
5264  // Always call after construction.
5265  void Init(
5266  VmaAllocator hAllocator,
5267  uint32_t newMemoryTypeIndex,
5268  VkDeviceMemory newMemory,
5269  VkDeviceSize newSize,
5270  uint32_t id,
5271  uint32_t algorithm);
5272  // Always call before destruction.
5273  void Destroy(VmaAllocator allocator);
5274 
5275  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5277  uint32_t GetId() const { return m_Id; }
5278  void* GetMappedData() const { return m_pMappedData; }
5279 
5280  // Validates all data structures inside this object. If not valid, returns false.
5281  bool Validate() const;
5282 
5283  VkResult CheckCorruption(VmaAllocator hAllocator);
5284 
5285  // ppData can be null.
5286  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5287  void Unmap(VmaAllocator hAllocator, uint32_t count);
5288 
5289  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5290  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5291 
5292  VkResult BindBufferMemory(
5293  const VmaAllocator hAllocator,
5294  const VmaAllocation hAllocation,
5295  VkBuffer hBuffer);
5296  VkResult BindImageMemory(
5297  const VmaAllocator hAllocator,
5298  const VmaAllocation hAllocation,
5299  VkImage hImage);
5300 
5301 private:
5302  uint32_t m_MemoryTypeIndex;
5303  uint32_t m_Id;
5304  VkDeviceMemory m_hMemory;
5305 
5306  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5307  // Also protects m_MapCount, m_pMappedData.
5308  VMA_MUTEX m_Mutex;
5309  uint32_t m_MapCount;
5310  void* m_pMappedData;
5311 };
5312 
5313 struct VmaPointerLess
5314 {
5315  bool operator()(const void* lhs, const void* rhs) const
5316  {
5317  return lhs < rhs;
5318  }
5319 };
5320 
5321 class VmaDefragmentator;
5322 
5323 /*
5324 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5325 Vulkan memory type.
5326 
5327 Synchronized internally with a mutex.
5328 */
5329 struct VmaBlockVector
5330 {
5331  VMA_CLASS_NO_COPY(VmaBlockVector)
5332 public:
5333  VmaBlockVector(
5334  VmaAllocator hAllocator,
5335  uint32_t memoryTypeIndex,
5336  VkDeviceSize preferredBlockSize,
5337  size_t minBlockCount,
5338  size_t maxBlockCount,
5339  VkDeviceSize bufferImageGranularity,
5340  uint32_t frameInUseCount,
5341  bool isCustomPool,
5342  bool explicitBlockSize,
5343  uint32_t algorithm);
5344  ~VmaBlockVector();
5345 
5346  VkResult CreateMinBlocks();
5347 
5348  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5349  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5350  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5351  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5352  uint32_t GetAlgorithm() const { return m_Algorithm; }
5353 
5354  void GetPoolStats(VmaPoolStats* pStats);
5355 
5356  bool IsEmpty() const { return m_Blocks.empty(); }
5357  bool IsCorruptionDetectionEnabled() const;
5358 
5359  VkResult Allocate(
5360  VmaPool hCurrentPool,
5361  uint32_t currentFrameIndex,
5362  VkDeviceSize size,
5363  VkDeviceSize alignment,
5364  const VmaAllocationCreateInfo& createInfo,
5365  VmaSuballocationType suballocType,
5366  VmaAllocation* pAllocation);
5367 
5368  void Free(
5369  VmaAllocation hAllocation);
5370 
5371  // Adds statistics of this BlockVector to pStats.
5372  void AddStats(VmaStats* pStats);
5373 
5374 #if VMA_STATS_STRING_ENABLED
5375  void PrintDetailedMap(class VmaJsonWriter& json);
5376 #endif
5377 
5378  void MakePoolAllocationsLost(
5379  uint32_t currentFrameIndex,
5380  size_t* pLostAllocationCount);
5381  VkResult CheckCorruption();
5382 
5383  VmaDefragmentator* EnsureDefragmentator(
5384  VmaAllocator hAllocator,
5385  uint32_t currentFrameIndex);
5386 
5387  VkResult Defragment(
5388  VmaDefragmentationStats* pDefragmentationStats,
5389  VkDeviceSize& maxBytesToMove,
5390  uint32_t& maxAllocationsToMove);
5391 
5392  void DestroyDefragmentator();
5393 
5394 private:
5395  friend class VmaDefragmentator;
5396 
5397  const VmaAllocator m_hAllocator;
5398  const uint32_t m_MemoryTypeIndex;
5399  const VkDeviceSize m_PreferredBlockSize;
5400  const size_t m_MinBlockCount;
5401  const size_t m_MaxBlockCount;
5402  const VkDeviceSize m_BufferImageGranularity;
5403  const uint32_t m_FrameInUseCount;
5404  const bool m_IsCustomPool;
5405  const bool m_ExplicitBlockSize;
5406  const uint32_t m_Algorithm;
5407  bool m_HasEmptyBlock;
5408  VMA_MUTEX m_Mutex;
5409  // Incrementally sorted by sumFreeSize, ascending.
5410  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5411  /* There can be at most one allocation that is completely empty - a
5412  hysteresis to avoid pessimistic case of alternating creation and destruction
5413  of a VkDeviceMemory. */
5414  VmaDefragmentator* m_pDefragmentator;
5415  uint32_t m_NextBlockId;
5416 
5417  VkDeviceSize CalcMaxBlockSize() const;
5418 
5419  // Finds and removes given block from vector.
5420  void Remove(VmaDeviceMemoryBlock* pBlock);
5421 
5422  // Performs single step in sorting m_Blocks. They may not be fully sorted
5423  // after this call.
5424  void IncrementallySortBlocks();
5425 
5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
5427  VkResult AllocateFromBlock(
5428  VmaDeviceMemoryBlock* pBlock,
5429  VmaPool hCurrentPool,
5430  uint32_t currentFrameIndex,
5431  VkDeviceSize size,
5432  VkDeviceSize alignment,
5433  VmaAllocationCreateFlags allocFlags,
5434  void* pUserData,
5435  VmaSuballocationType suballocType,
5436  uint32_t strategy,
5437  VmaAllocation* pAllocation);
5438 
5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5440 };
5441 
5442 struct VmaPool_T
5443 {
5444  VMA_CLASS_NO_COPY(VmaPool_T)
5445 public:
5446  VmaBlockVector m_BlockVector;
5447 
5448  VmaPool_T(
5449  VmaAllocator hAllocator,
5450  const VmaPoolCreateInfo& createInfo,
5451  VkDeviceSize preferredBlockSize);
5452  ~VmaPool_T();
5453 
5454  uint32_t GetId() const { return m_Id; }
5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5456 
5457 #if VMA_STATS_STRING_ENABLED
5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
5459 #endif
5460 
5461 private:
5462  uint32_t m_Id;
5463 };
5464 
5465 class VmaDefragmentator
5466 {
5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
5468 private:
5469  const VmaAllocator m_hAllocator;
5470  VmaBlockVector* const m_pBlockVector;
5471  uint32_t m_CurrentFrameIndex;
5472  VkDeviceSize m_BytesMoved;
5473  uint32_t m_AllocationsMoved;
5474 
5475  struct AllocationInfo
5476  {
5477  VmaAllocation m_hAllocation;
5478  VkBool32* m_pChanged;
5479 
5480  AllocationInfo() :
5481  m_hAllocation(VK_NULL_HANDLE),
5482  m_pChanged(VMA_NULL)
5483  {
5484  }
5485  };
5486 
5487  struct AllocationInfoSizeGreater
5488  {
5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5490  {
5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5492  }
5493  };
5494 
5495  // Used between AddAllocation and Defragment.
5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5497 
5498  struct BlockInfo
5499  {
5500  VmaDeviceMemoryBlock* m_pBlock;
5501  bool m_HasNonMovableAllocations;
5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5503 
5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5505  m_pBlock(VMA_NULL),
5506  m_HasNonMovableAllocations(true),
5507  m_Allocations(pAllocationCallbacks),
5508  m_pMappedDataForDefragmentation(VMA_NULL)
5509  {
5510  }
5511 
5512  void CalcHasNonMovableAllocations()
5513  {
5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5515  const size_t defragmentAllocCount = m_Allocations.size();
5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5517  }
5518 
5519  void SortAllocationsBySizeDescecnding()
5520  {
5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5522  }
5523 
5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5525  void Unmap(VmaAllocator hAllocator);
5526 
5527  private:
5528  // Not null if mapped for defragmentation only, not originally mapped.
5529  void* m_pMappedDataForDefragmentation;
5530  };
5531 
5532  struct BlockPointerLess
5533  {
5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5535  {
5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5537  }
5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5539  {
5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5541  }
5542  };
5543 
5544  // 1. Blocks with some non-movable allocations go first.
5545  // 2. Blocks with smaller sumFreeSize go first.
5546  struct BlockInfoCompareMoveDestination
5547  {
5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5549  {
5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5551  {
5552  return true;
5553  }
5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5555  {
5556  return false;
5557  }
5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5559  {
5560  return true;
5561  }
5562  return false;
5563  }
5564  };
5565 
5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5567  BlockInfoVector m_Blocks;
5568 
5569  VkResult DefragmentRound(
5570  VkDeviceSize maxBytesToMove,
5571  uint32_t maxAllocationsToMove);
5572 
5573  static bool MoveMakesSense(
5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
5576 
5577 public:
5578  VmaDefragmentator(
5579  VmaAllocator hAllocator,
5580  VmaBlockVector* pBlockVector,
5581  uint32_t currentFrameIndex);
5582 
5583  ~VmaDefragmentator();
5584 
5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5587 
5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5589 
5590  VkResult Defragment(
5591  VkDeviceSize maxBytesToMove,
5592  uint32_t maxAllocationsToMove);
5593 };
5594 
5595 #if VMA_RECORDING_ENABLED
5596 
5597 class VmaRecorder
5598 {
5599 public:
5600  VmaRecorder();
5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5602  void WriteConfiguration(
5603  const VkPhysicalDeviceProperties& devProps,
5604  const VkPhysicalDeviceMemoryProperties& memProps,
5605  bool dedicatedAllocationExtensionEnabled);
5606  ~VmaRecorder();
5607 
5608  void RecordCreateAllocator(uint32_t frameIndex);
5609  void RecordDestroyAllocator(uint32_t frameIndex);
5610  void RecordCreatePool(uint32_t frameIndex,
5611  const VmaPoolCreateInfo& createInfo,
5612  VmaPool pool);
5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5614  void RecordAllocateMemory(uint32_t frameIndex,
5615  const VkMemoryRequirements& vkMemReq,
5616  const VmaAllocationCreateInfo& createInfo,
5617  VmaAllocation allocation);
5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5619  const VkMemoryRequirements& vkMemReq,
5620  bool requiresDedicatedAllocation,
5621  bool prefersDedicatedAllocation,
5622  const VmaAllocationCreateInfo& createInfo,
5623  VmaAllocation allocation);
5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5625  const VkMemoryRequirements& vkMemReq,
5626  bool requiresDedicatedAllocation,
5627  bool prefersDedicatedAllocation,
5628  const VmaAllocationCreateInfo& createInfo,
5629  VmaAllocation allocation);
5630  void RecordFreeMemory(uint32_t frameIndex,
5631  VmaAllocation allocation);
5632  void RecordResizeAllocation(
5633  uint32_t frameIndex,
5634  VmaAllocation allocation,
5635  VkDeviceSize newSize);
5636  void RecordSetAllocationUserData(uint32_t frameIndex,
5637  VmaAllocation allocation,
5638  const void* pUserData);
5639  void RecordCreateLostAllocation(uint32_t frameIndex,
5640  VmaAllocation allocation);
5641  void RecordMapMemory(uint32_t frameIndex,
5642  VmaAllocation allocation);
5643  void RecordUnmapMemory(uint32_t frameIndex,
5644  VmaAllocation allocation);
5645  void RecordFlushAllocation(uint32_t frameIndex,
5646  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5647  void RecordInvalidateAllocation(uint32_t frameIndex,
5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5649  void RecordCreateBuffer(uint32_t frameIndex,
5650  const VkBufferCreateInfo& bufCreateInfo,
5651  const VmaAllocationCreateInfo& allocCreateInfo,
5652  VmaAllocation allocation);
5653  void RecordCreateImage(uint32_t frameIndex,
5654  const VkImageCreateInfo& imageCreateInfo,
5655  const VmaAllocationCreateInfo& allocCreateInfo,
5656  VmaAllocation allocation);
5657  void RecordDestroyBuffer(uint32_t frameIndex,
5658  VmaAllocation allocation);
5659  void RecordDestroyImage(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordTouchAllocation(uint32_t frameIndex,
5662  VmaAllocation allocation);
5663  void RecordGetAllocationInfo(uint32_t frameIndex,
5664  VmaAllocation allocation);
5665  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5666  VmaPool pool);
5667 
5668 private:
5669  struct CallParams
5670  {
5671  uint32_t threadId;
5672  double time;
5673  };
5674 
5675  class UserDataString
5676  {
5677  public:
5678  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5679  const char* GetString() const { return m_Str; }
5680 
5681  private:
5682  char m_PtrStr[17];
5683  const char* m_Str;
5684  };
5685 
5686  bool m_UseMutex;
5687  VmaRecordFlags m_Flags;
5688  FILE* m_File;
5689  VMA_MUTEX m_FileMutex;
5690  int64_t m_Freq;
5691  int64_t m_StartCounter;
5692 
5693  void GetBasicParams(CallParams& outParams);
5694  void Flush();
5695 };
5696 
5697 #endif // #if VMA_RECORDING_ENABLED
5698 
5699 // Main allocator object.
5700 struct VmaAllocator_T
5701 {
5702  VMA_CLASS_NO_COPY(VmaAllocator_T)
5703 public:
5704  bool m_UseMutex;
5705  bool m_UseKhrDedicatedAllocation;
5706  VkDevice m_hDevice;
5707  bool m_AllocationCallbacksSpecified;
5708  VkAllocationCallbacks m_AllocationCallbacks;
5709  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5710 
5711  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5712  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5713  VMA_MUTEX m_HeapSizeLimitMutex;
5714 
5715  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5716  VkPhysicalDeviceMemoryProperties m_MemProps;
5717 
5718  // Default pools.
5719  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5720 
5721  // Each vector is sorted by memory (handle value).
5722  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5723  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5724  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5725 
5726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5728  ~VmaAllocator_T();
5729 
5730  const VkAllocationCallbacks* GetAllocationCallbacks() const
5731  {
5732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5733  }
5734  const VmaVulkanFunctions& GetVulkanFunctions() const
5735  {
5736  return m_VulkanFunctions;
5737  }
5738 
5739  VkDeviceSize GetBufferImageGranularity() const
5740  {
5741  return VMA_MAX(
5742  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5743  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5744  }
5745 
5746  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5747  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5748 
5749  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5750  {
5751  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5752  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5753  }
5754  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5755  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5756  {
5757  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5758  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5759  }
5760  // Minimum alignment for all allocations in specific memory type.
5761  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5762  {
5763  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5764  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5765  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5766  }
5767 
5768  bool IsIntegratedGpu() const
5769  {
5770  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5771  }
5772 
5773 #if VMA_RECORDING_ENABLED
5774  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5775 #endif
5776 
5777  void GetBufferMemoryRequirements(
5778  VkBuffer hBuffer,
5779  VkMemoryRequirements& memReq,
5780  bool& requiresDedicatedAllocation,
5781  bool& prefersDedicatedAllocation) const;
5782  void GetImageMemoryRequirements(
5783  VkImage hImage,
5784  VkMemoryRequirements& memReq,
5785  bool& requiresDedicatedAllocation,
5786  bool& prefersDedicatedAllocation) const;
5787 
5788  // Main allocation function.
5789  VkResult AllocateMemory(
5790  const VkMemoryRequirements& vkMemReq,
5791  bool requiresDedicatedAllocation,
5792  bool prefersDedicatedAllocation,
5793  VkBuffer dedicatedBuffer,
5794  VkImage dedicatedImage,
5795  const VmaAllocationCreateInfo& createInfo,
5796  VmaSuballocationType suballocType,
5797  VmaAllocation* pAllocation);
5798 
5799  // Main deallocation function.
5800  void FreeMemory(const VmaAllocation allocation);
5801 
5802  VkResult ResizeAllocation(
5803  const VmaAllocation alloc,
5804  VkDeviceSize newSize);
5805 
5806  void CalculateStats(VmaStats* pStats);
5807 
5808 #if VMA_STATS_STRING_ENABLED
5809  void PrintDetailedMap(class VmaJsonWriter& json);
5810 #endif
5811 
5812  VkResult Defragment(
5813  VmaAllocation* pAllocations,
5814  size_t allocationCount,
5815  VkBool32* pAllocationsChanged,
5816  const VmaDefragmentationInfo* pDefragmentationInfo,
5817  VmaDefragmentationStats* pDefragmentationStats);
5818 
5819  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5820  bool TouchAllocation(VmaAllocation hAllocation);
5821 
5822  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5823  void DestroyPool(VmaPool pool);
5824  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5825 
5826  void SetCurrentFrameIndex(uint32_t frameIndex);
5827  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5828 
5829  void MakePoolAllocationsLost(
5830  VmaPool hPool,
5831  size_t* pLostAllocationCount);
5832  VkResult CheckPoolCorruption(VmaPool hPool);
5833  VkResult CheckCorruption(uint32_t memoryTypeBits);
5834 
5835  void CreateLostAllocation(VmaAllocation* pAllocation);
5836 
5837  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5838  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5839 
5840  VkResult Map(VmaAllocation hAllocation, void** ppData);
5841  void Unmap(VmaAllocation hAllocation);
5842 
5843  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5844  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5845 
5846  void FlushOrInvalidateAllocation(
5847  VmaAllocation hAllocation,
5848  VkDeviceSize offset, VkDeviceSize size,
5849  VMA_CACHE_OPERATION op);
5850 
5851  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5852 
5853 private:
5854  VkDeviceSize m_PreferredLargeHeapBlockSize;
5855 
5856  VkPhysicalDevice m_PhysicalDevice;
5857  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5858 
5859  VMA_MUTEX m_PoolsMutex;
5860  // Protected by m_PoolsMutex. Sorted by pointer value.
5861  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5862  uint32_t m_NextPoolId;
5863 
5864  VmaVulkanFunctions m_VulkanFunctions;
5865 
5866 #if VMA_RECORDING_ENABLED
5867  VmaRecorder* m_pRecorder;
5868 #endif
5869 
5870  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5871 
5872  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5873 
5874  VkResult AllocateMemoryOfType(
5875  VkDeviceSize size,
5876  VkDeviceSize alignment,
5877  bool dedicatedAllocation,
5878  VkBuffer dedicatedBuffer,
5879  VkImage dedicatedImage,
5880  const VmaAllocationCreateInfo& createInfo,
5881  uint32_t memTypeIndex,
5882  VmaSuballocationType suballocType,
5883  VmaAllocation* pAllocation);
5884 
5885  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5886  VkResult AllocateDedicatedMemory(
5887  VkDeviceSize size,
5888  VmaSuballocationType suballocType,
5889  uint32_t memTypeIndex,
5890  bool map,
5891  bool isUserDataString,
5892  void* pUserData,
5893  VkBuffer dedicatedBuffer,
5894  VkImage dedicatedImage,
5895  VmaAllocation* pAllocation);
5896 
5897  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5898  void FreeDedicatedMemory(VmaAllocation allocation);
5899 };
5900 
5902 // Memory allocation #2 after VmaAllocator_T definition
5903 
5904 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5905 {
5906  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5907 }
5908 
5909 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5910 {
5911  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5912 }
5913 
5914 template<typename T>
5915 static T* VmaAllocate(VmaAllocator hAllocator)
5916 {
5917  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5918 }
5919 
5920 template<typename T>
5921 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5922 {
5923  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5924 }
5925 
5926 template<typename T>
5927 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5928 {
5929  if(ptr != VMA_NULL)
5930  {
5931  ptr->~T();
5932  VmaFree(hAllocator, ptr);
5933  }
5934 }
5935 
5936 template<typename T>
5937 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5938 {
5939  if(ptr != VMA_NULL)
5940  {
5941  for(size_t i = count; i--; )
5942  ptr[i].~T();
5943  VmaFree(hAllocator, ptr);
5944  }
5945 }
5946 
5948 // VmaStringBuilder
5949 
5950 #if VMA_STATS_STRING_ENABLED
5951 
5952 class VmaStringBuilder
5953 {
5954 public:
5955  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5956  size_t GetLength() const { return m_Data.size(); }
5957  const char* GetData() const { return m_Data.data(); }
5958 
5959  void Add(char ch) { m_Data.push_back(ch); }
5960  void Add(const char* pStr);
5961  void AddNewLine() { Add('\n'); }
5962  void AddNumber(uint32_t num);
5963  void AddNumber(uint64_t num);
5964  void AddPointer(const void* ptr);
5965 
5966 private:
5967  VmaVector< char, VmaStlAllocator<char> > m_Data;
5968 };
5969 
5970 void VmaStringBuilder::Add(const char* pStr)
5971 {
5972  const size_t strLen = strlen(pStr);
5973  if(strLen > 0)
5974  {
5975  const size_t oldCount = m_Data.size();
5976  m_Data.resize(oldCount + strLen);
5977  memcpy(m_Data.data() + oldCount, pStr, strLen);
5978  }
5979 }
5980 
5981 void VmaStringBuilder::AddNumber(uint32_t num)
5982 {
5983  char buf[11];
5984  VmaUint32ToStr(buf, sizeof(buf), num);
5985  Add(buf);
5986 }
5987 
5988 void VmaStringBuilder::AddNumber(uint64_t num)
5989 {
5990  char buf[21];
5991  VmaUint64ToStr(buf, sizeof(buf), num);
5992  Add(buf);
5993 }
5994 
5995 void VmaStringBuilder::AddPointer(const void* ptr)
5996 {
5997  char buf[21];
5998  VmaPtrToStr(buf, sizeof(buf), ptr);
5999  Add(buf);
6000 }
6001 
6002 #endif // #if VMA_STATS_STRING_ENABLED
6003 
6005 // VmaJsonWriter
6006 
6007 #if VMA_STATS_STRING_ENABLED
6008 
6009 class VmaJsonWriter
6010 {
6011  VMA_CLASS_NO_COPY(VmaJsonWriter)
6012 public:
6013  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6014  ~VmaJsonWriter();
6015 
6016  void BeginObject(bool singleLine = false);
6017  void EndObject();
6018 
6019  void BeginArray(bool singleLine = false);
6020  void EndArray();
6021 
6022  void WriteString(const char* pStr);
6023  void BeginString(const char* pStr = VMA_NULL);
6024  void ContinueString(const char* pStr);
6025  void ContinueString(uint32_t n);
6026  void ContinueString(uint64_t n);
6027  void ContinueString_Pointer(const void* ptr);
6028  void EndString(const char* pStr = VMA_NULL);
6029 
6030  void WriteNumber(uint32_t n);
6031  void WriteNumber(uint64_t n);
6032  void WriteBool(bool b);
6033  void WriteNull();
6034 
6035 private:
6036  static const char* const INDENT;
6037 
6038  enum COLLECTION_TYPE
6039  {
6040  COLLECTION_TYPE_OBJECT,
6041  COLLECTION_TYPE_ARRAY,
6042  };
6043  struct StackItem
6044  {
6045  COLLECTION_TYPE type;
6046  uint32_t valueCount;
6047  bool singleLineMode;
6048  };
6049 
6050  VmaStringBuilder& m_SB;
6051  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6052  bool m_InsideString;
6053 
6054  void BeginValue(bool isString);
6055  void WriteIndent(bool oneLess = false);
6056 };
6057 
6058 const char* const VmaJsonWriter::INDENT = " ";
6059 
6060 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6061  m_SB(sb),
6062  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6063  m_InsideString(false)
6064 {
6065 }
6066 
6067 VmaJsonWriter::~VmaJsonWriter()
6068 {
6069  VMA_ASSERT(!m_InsideString);
6070  VMA_ASSERT(m_Stack.empty());
6071 }
6072 
6073 void VmaJsonWriter::BeginObject(bool singleLine)
6074 {
6075  VMA_ASSERT(!m_InsideString);
6076 
6077  BeginValue(false);
6078  m_SB.Add('{');
6079 
6080  StackItem item;
6081  item.type = COLLECTION_TYPE_OBJECT;
6082  item.valueCount = 0;
6083  item.singleLineMode = singleLine;
6084  m_Stack.push_back(item);
6085 }
6086 
6087 void VmaJsonWriter::EndObject()
6088 {
6089  VMA_ASSERT(!m_InsideString);
6090 
6091  WriteIndent(true);
6092  m_SB.Add('}');
6093 
6094  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6095  m_Stack.pop_back();
6096 }
6097 
6098 void VmaJsonWriter::BeginArray(bool singleLine)
6099 {
6100  VMA_ASSERT(!m_InsideString);
6101 
6102  BeginValue(false);
6103  m_SB.Add('[');
6104 
6105  StackItem item;
6106  item.type = COLLECTION_TYPE_ARRAY;
6107  item.valueCount = 0;
6108  item.singleLineMode = singleLine;
6109  m_Stack.push_back(item);
6110 }
6111 
6112 void VmaJsonWriter::EndArray()
6113 {
6114  VMA_ASSERT(!m_InsideString);
6115 
6116  WriteIndent(true);
6117  m_SB.Add(']');
6118 
6119  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6120  m_Stack.pop_back();
6121 }
6122 
6123 void VmaJsonWriter::WriteString(const char* pStr)
6124 {
6125  BeginString(pStr);
6126  EndString();
6127 }
6128 
6129 void VmaJsonWriter::BeginString(const char* pStr)
6130 {
6131  VMA_ASSERT(!m_InsideString);
6132 
6133  BeginValue(true);
6134  m_SB.Add('"');
6135  m_InsideString = true;
6136  if(pStr != VMA_NULL && pStr[0] != '\0')
6137  {
6138  ContinueString(pStr);
6139  }
6140 }
6141 
6142 void VmaJsonWriter::ContinueString(const char* pStr)
6143 {
6144  VMA_ASSERT(m_InsideString);
6145 
6146  const size_t strLen = strlen(pStr);
6147  for(size_t i = 0; i < strLen; ++i)
6148  {
6149  char ch = pStr[i];
6150  if(ch == '\\')
6151  {
6152  m_SB.Add("\\\\");
6153  }
6154  else if(ch == '"')
6155  {
6156  m_SB.Add("\\\"");
6157  }
6158  else if(ch >= 32)
6159  {
6160  m_SB.Add(ch);
6161  }
6162  else switch(ch)
6163  {
6164  case '\b':
6165  m_SB.Add("\\b");
6166  break;
6167  case '\f':
6168  m_SB.Add("\\f");
6169  break;
6170  case '\n':
6171  m_SB.Add("\\n");
6172  break;
6173  case '\r':
6174  m_SB.Add("\\r");
6175  break;
6176  case '\t':
6177  m_SB.Add("\\t");
6178  break;
6179  default:
6180  VMA_ASSERT(0 && "Character not currently supported.");
6181  break;
6182  }
6183  }
6184 }
6185 
6186 void VmaJsonWriter::ContinueString(uint32_t n)
6187 {
6188  VMA_ASSERT(m_InsideString);
6189  m_SB.AddNumber(n);
6190 }
6191 
6192 void VmaJsonWriter::ContinueString(uint64_t n)
6193 {
6194  VMA_ASSERT(m_InsideString);
6195  m_SB.AddNumber(n);
6196 }
6197 
6198 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6199 {
6200  VMA_ASSERT(m_InsideString);
6201  m_SB.AddPointer(ptr);
6202 }
6203 
6204 void VmaJsonWriter::EndString(const char* pStr)
6205 {
6206  VMA_ASSERT(m_InsideString);
6207  if(pStr != VMA_NULL && pStr[0] != '\0')
6208  {
6209  ContinueString(pStr);
6210  }
6211  m_SB.Add('"');
6212  m_InsideString = false;
6213 }
6214 
6215 void VmaJsonWriter::WriteNumber(uint32_t n)
6216 {
6217  VMA_ASSERT(!m_InsideString);
6218  BeginValue(false);
6219  m_SB.AddNumber(n);
6220 }
6221 
6222 void VmaJsonWriter::WriteNumber(uint64_t n)
6223 {
6224  VMA_ASSERT(!m_InsideString);
6225  BeginValue(false);
6226  m_SB.AddNumber(n);
6227 }
6228 
6229 void VmaJsonWriter::WriteBool(bool b)
6230 {
6231  VMA_ASSERT(!m_InsideString);
6232  BeginValue(false);
6233  m_SB.Add(b ? "true" : "false");
6234 }
6235 
6236 void VmaJsonWriter::WriteNull()
6237 {
6238  VMA_ASSERT(!m_InsideString);
6239  BeginValue(false);
6240  m_SB.Add("null");
6241 }
6242 
6243 void VmaJsonWriter::BeginValue(bool isString)
6244 {
6245  if(!m_Stack.empty())
6246  {
6247  StackItem& currItem = m_Stack.back();
6248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6249  currItem.valueCount % 2 == 0)
6250  {
6251  VMA_ASSERT(isString);
6252  }
6253 
6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6255  currItem.valueCount % 2 != 0)
6256  {
6257  m_SB.Add(": ");
6258  }
6259  else if(currItem.valueCount > 0)
6260  {
6261  m_SB.Add(", ");
6262  WriteIndent();
6263  }
6264  else
6265  {
6266  WriteIndent();
6267  }
6268  ++currItem.valueCount;
6269  }
6270 }
6271 
6272 void VmaJsonWriter::WriteIndent(bool oneLess)
6273 {
6274  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6275  {
6276  m_SB.AddNewLine();
6277 
6278  size_t count = m_Stack.size();
6279  if(count > 0 && oneLess)
6280  {
6281  --count;
6282  }
6283  for(size_t i = 0; i < count; ++i)
6284  {
6285  m_SB.Add(INDENT);
6286  }
6287  }
6288 }
6289 
6290 #endif // #if VMA_STATS_STRING_ENABLED
6291 
6293 
6294 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6295 {
6296  if(IsUserDataString())
6297  {
6298  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6299 
6300  FreeUserDataString(hAllocator);
6301 
6302  if(pUserData != VMA_NULL)
6303  {
6304  const char* const newStrSrc = (char*)pUserData;
6305  const size_t newStrLen = strlen(newStrSrc);
6306  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6307  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6308  m_pUserData = newStrDst;
6309  }
6310  }
6311  else
6312  {
6313  m_pUserData = pUserData;
6314  }
6315 }
6316 
6317 void VmaAllocation_T::ChangeBlockAllocation(
6318  VmaAllocator hAllocator,
6319  VmaDeviceMemoryBlock* block,
6320  VkDeviceSize offset)
6321 {
6322  VMA_ASSERT(block != VMA_NULL);
6323  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6324 
6325  // Move mapping reference counter from old block to new block.
6326  if(block != m_BlockAllocation.m_Block)
6327  {
6328  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6329  if(IsPersistentMap())
6330  ++mapRefCount;
6331  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6332  block->Map(hAllocator, mapRefCount, VMA_NULL);
6333  }
6334 
6335  m_BlockAllocation.m_Block = block;
6336  m_BlockAllocation.m_Offset = offset;
6337 }
6338 
6339 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6340 {
6341  VMA_ASSERT(newSize > 0);
6342  m_Size = newSize;
6343 }
6344 
6345 VkDeviceSize VmaAllocation_T::GetOffset() const
6346 {
6347  switch(m_Type)
6348  {
6349  case ALLOCATION_TYPE_BLOCK:
6350  return m_BlockAllocation.m_Offset;
6351  case ALLOCATION_TYPE_DEDICATED:
6352  return 0;
6353  default:
6354  VMA_ASSERT(0);
6355  return 0;
6356  }
6357 }
6358 
6359 VkDeviceMemory VmaAllocation_T::GetMemory() const
6360 {
6361  switch(m_Type)
6362  {
6363  case ALLOCATION_TYPE_BLOCK:
6364  return m_BlockAllocation.m_Block->GetDeviceMemory();
6365  case ALLOCATION_TYPE_DEDICATED:
6366  return m_DedicatedAllocation.m_hMemory;
6367  default:
6368  VMA_ASSERT(0);
6369  return VK_NULL_HANDLE;
6370  }
6371 }
6372 
6373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6374 {
6375  switch(m_Type)
6376  {
6377  case ALLOCATION_TYPE_BLOCK:
6378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6379  case ALLOCATION_TYPE_DEDICATED:
6380  return m_DedicatedAllocation.m_MemoryTypeIndex;
6381  default:
6382  VMA_ASSERT(0);
6383  return UINT32_MAX;
6384  }
6385 }
6386 
6387 void* VmaAllocation_T::GetMappedData() const
6388 {
6389  switch(m_Type)
6390  {
6391  case ALLOCATION_TYPE_BLOCK:
6392  if(m_MapCount != 0)
6393  {
6394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6395  VMA_ASSERT(pBlockData != VMA_NULL);
6396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6397  }
6398  else
6399  {
6400  return VMA_NULL;
6401  }
6402  break;
6403  case ALLOCATION_TYPE_DEDICATED:
6404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6405  return m_DedicatedAllocation.m_pMappedData;
6406  default:
6407  VMA_ASSERT(0);
6408  return VMA_NULL;
6409  }
6410 }
6411 
6412 bool VmaAllocation_T::CanBecomeLost() const
6413 {
6414  switch(m_Type)
6415  {
6416  case ALLOCATION_TYPE_BLOCK:
6417  return m_BlockAllocation.m_CanBecomeLost;
6418  case ALLOCATION_TYPE_DEDICATED:
6419  return false;
6420  default:
6421  VMA_ASSERT(0);
6422  return false;
6423  }
6424 }
6425 
6426 VmaPool VmaAllocation_T::GetPool() const
6427 {
6428  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6429  return m_BlockAllocation.m_hPool;
6430 }
6431 
6432 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6433 {
6434  VMA_ASSERT(CanBecomeLost());
6435 
6436  /*
6437  Warning: This is a carefully designed algorithm.
6438  Do not modify unless you really know what you're doing :)
6439  */
6440  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6441  for(;;)
6442  {
6443  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6444  {
6445  VMA_ASSERT(0);
6446  return false;
6447  }
6448  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6449  {
6450  return false;
6451  }
6452  else // Last use time earlier than current time.
6453  {
6454  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6455  {
6456  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6457  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6458  return true;
6459  }
6460  }
6461  }
6462 }
6463 
6464 #if VMA_STATS_STRING_ENABLED
6465 
6466 // Correspond to values of enum VmaSuballocationType.
6467 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6468  "FREE",
6469  "UNKNOWN",
6470  "BUFFER",
6471  "IMAGE_UNKNOWN",
6472  "IMAGE_LINEAR",
6473  "IMAGE_OPTIMAL",
6474 };
6475 
6476 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6477 {
6478  json.WriteString("Type");
6479  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6480 
6481  json.WriteString("Size");
6482  json.WriteNumber(m_Size);
6483 
6484  if(m_pUserData != VMA_NULL)
6485  {
6486  json.WriteString("UserData");
6487  if(IsUserDataString())
6488  {
6489  json.WriteString((const char*)m_pUserData);
6490  }
6491  else
6492  {
6493  json.BeginString();
6494  json.ContinueString_Pointer(m_pUserData);
6495  json.EndString();
6496  }
6497  }
6498 
6499  json.WriteString("CreationFrameIndex");
6500  json.WriteNumber(m_CreationFrameIndex);
6501 
6502  json.WriteString("LastUseFrameIndex");
6503  json.WriteNumber(GetLastUseFrameIndex());
6504 
6505  if(m_BufferImageUsage != 0)
6506  {
6507  json.WriteString("Usage");
6508  json.WriteNumber(m_BufferImageUsage);
6509  }
6510 }
6511 
6512 #endif
6513 
6514 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6515 {
6516  VMA_ASSERT(IsUserDataString());
6517  if(m_pUserData != VMA_NULL)
6518  {
6519  char* const oldStr = (char*)m_pUserData;
6520  const size_t oldStrLen = strlen(oldStr);
6521  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6522  m_pUserData = VMA_NULL;
6523  }
6524 }
6525 
6526 void VmaAllocation_T::BlockAllocMap()
6527 {
6528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6529 
6530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6531  {
6532  ++m_MapCount;
6533  }
6534  else
6535  {
6536  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6537  }
6538 }
6539 
6540 void VmaAllocation_T::BlockAllocUnmap()
6541 {
6542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6543 
6544  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6545  {
6546  --m_MapCount;
6547  }
6548  else
6549  {
6550  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6551  }
6552 }
6553 
6554 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6555 {
6556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6557 
6558  if(m_MapCount != 0)
6559  {
6560  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6561  {
6562  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6563  *ppData = m_DedicatedAllocation.m_pMappedData;
6564  ++m_MapCount;
6565  return VK_SUCCESS;
6566  }
6567  else
6568  {
6569  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6570  return VK_ERROR_MEMORY_MAP_FAILED;
6571  }
6572  }
6573  else
6574  {
6575  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6576  hAllocator->m_hDevice,
6577  m_DedicatedAllocation.m_hMemory,
6578  0, // offset
6579  VK_WHOLE_SIZE,
6580  0, // flags
6581  ppData);
6582  if(result == VK_SUCCESS)
6583  {
6584  m_DedicatedAllocation.m_pMappedData = *ppData;
6585  m_MapCount = 1;
6586  }
6587  return result;
6588  }
6589 }
6590 
6591 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6592 {
6593  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6594 
6595  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6596  {
6597  --m_MapCount;
6598  if(m_MapCount == 0)
6599  {
6600  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6602  hAllocator->m_hDevice,
6603  m_DedicatedAllocation.m_hMemory);
6604  }
6605  }
6606  else
6607  {
6608  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6609  }
6610 }
6611 
6612 #if VMA_STATS_STRING_ENABLED
6613 
6614 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6615 {
6616  json.BeginObject();
6617 
6618  json.WriteString("Blocks");
6619  json.WriteNumber(stat.blockCount);
6620 
6621  json.WriteString("Allocations");
6622  json.WriteNumber(stat.allocationCount);
6623 
6624  json.WriteString("UnusedRanges");
6625  json.WriteNumber(stat.unusedRangeCount);
6626 
6627  json.WriteString("UsedBytes");
6628  json.WriteNumber(stat.usedBytes);
6629 
6630  json.WriteString("UnusedBytes");
6631  json.WriteNumber(stat.unusedBytes);
6632 
6633  if(stat.allocationCount > 1)
6634  {
6635  json.WriteString("AllocationSize");
6636  json.BeginObject(true);
6637  json.WriteString("Min");
6638  json.WriteNumber(stat.allocationSizeMin);
6639  json.WriteString("Avg");
6640  json.WriteNumber(stat.allocationSizeAvg);
6641  json.WriteString("Max");
6642  json.WriteNumber(stat.allocationSizeMax);
6643  json.EndObject();
6644  }
6645 
6646  if(stat.unusedRangeCount > 1)
6647  {
6648  json.WriteString("UnusedRangeSize");
6649  json.BeginObject(true);
6650  json.WriteString("Min");
6651  json.WriteNumber(stat.unusedRangeSizeMin);
6652  json.WriteString("Avg");
6653  json.WriteNumber(stat.unusedRangeSizeAvg);
6654  json.WriteString("Max");
6655  json.WriteNumber(stat.unusedRangeSizeMax);
6656  json.EndObject();
6657  }
6658 
6659  json.EndObject();
6660 }
6661 
6662 #endif // #if VMA_STATS_STRING_ENABLED
6663 
6664 struct VmaSuballocationItemSizeLess
6665 {
6666  bool operator()(
6667  const VmaSuballocationList::iterator lhs,
6668  const VmaSuballocationList::iterator rhs) const
6669  {
6670  return lhs->size < rhs->size;
6671  }
6672  bool operator()(
6673  const VmaSuballocationList::iterator lhs,
6674  VkDeviceSize rhsSize) const
6675  {
6676  return lhs->size < rhsSize;
6677  }
6678 };
6679 
6680 
6682 // class VmaBlockMetadata
6683 
6684 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6685  m_Size(0),
6686  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6687 {
6688 }
6689 
6690 #if VMA_STATS_STRING_ENABLED
6691 
6692 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6693  VkDeviceSize unusedBytes,
6694  size_t allocationCount,
6695  size_t unusedRangeCount) const
6696 {
6697  json.BeginObject();
6698 
6699  json.WriteString("TotalBytes");
6700  json.WriteNumber(GetSize());
6701 
6702  json.WriteString("UnusedBytes");
6703  json.WriteNumber(unusedBytes);
6704 
6705  json.WriteString("Allocations");
6706  json.WriteNumber((uint64_t)allocationCount);
6707 
6708  json.WriteString("UnusedRanges");
6709  json.WriteNumber((uint64_t)unusedRangeCount);
6710 
6711  json.WriteString("Suballocations");
6712  json.BeginArray();
6713 }
6714 
6715 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6716  VkDeviceSize offset,
6717  VmaAllocation hAllocation) const
6718 {
6719  json.BeginObject(true);
6720 
6721  json.WriteString("Offset");
6722  json.WriteNumber(offset);
6723 
6724  hAllocation->PrintParameters(json);
6725 
6726  json.EndObject();
6727 }
6728 
6729 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6730  VkDeviceSize offset,
6731  VkDeviceSize size) const
6732 {
6733  json.BeginObject(true);
6734 
6735  json.WriteString("Offset");
6736  json.WriteNumber(offset);
6737 
6738  json.WriteString("Type");
6739  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6740 
6741  json.WriteString("Size");
6742  json.WriteNumber(size);
6743 
6744  json.EndObject();
6745 }
6746 
6747 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6748 {
6749  json.EndArray();
6750  json.EndObject();
6751 }
6752 
6753 #endif // #if VMA_STATS_STRING_ENABLED
6754 
6756 // class VmaBlockMetadata_Generic
6757 
6758 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6759  VmaBlockMetadata(hAllocator),
6760  m_FreeCount(0),
6761  m_SumFreeSize(0),
6762  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6763  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6764 {
6765 }
6766 
6767 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6768 {
6769 }
6770 
6771 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6772 {
6773  VmaBlockMetadata::Init(size);
6774 
6775  m_FreeCount = 1;
6776  m_SumFreeSize = size;
6777 
6778  VmaSuballocation suballoc = {};
6779  suballoc.offset = 0;
6780  suballoc.size = size;
6781  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6782  suballoc.hAllocation = VK_NULL_HANDLE;
6783 
6784  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6785  m_Suballocations.push_back(suballoc);
6786  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6787  --suballocItem;
6788  m_FreeSuballocationsBySize.push_back(suballocItem);
6789 }
6790 
6791 bool VmaBlockMetadata_Generic::Validate() const
6792 {
6793  VMA_VALIDATE(!m_Suballocations.empty());
6794 
6795  // Expected offset of new suballocation as calculated from previous ones.
6796  VkDeviceSize calculatedOffset = 0;
6797  // Expected number of free suballocations as calculated from traversing their list.
6798  uint32_t calculatedFreeCount = 0;
6799  // Expected sum size of free suballocations as calculated from traversing their list.
6800  VkDeviceSize calculatedSumFreeSize = 0;
6801  // Expected number of free suballocations that should be registered in
6802  // m_FreeSuballocationsBySize calculated from traversing their list.
6803  size_t freeSuballocationsToRegister = 0;
6804  // True if previous visited suballocation was free.
6805  bool prevFree = false;
6806 
6807  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6808  suballocItem != m_Suballocations.cend();
6809  ++suballocItem)
6810  {
6811  const VmaSuballocation& subAlloc = *suballocItem;
6812 
6813  // Actual offset of this suballocation doesn't match expected one.
6814  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6815 
6816  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6817  // Two adjacent free suballocations are invalid. They should be merged.
6818  VMA_VALIDATE(!prevFree || !currFree);
6819 
6820  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6821 
6822  if(currFree)
6823  {
6824  calculatedSumFreeSize += subAlloc.size;
6825  ++calculatedFreeCount;
6826  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6827  {
6828  ++freeSuballocationsToRegister;
6829  }
6830 
6831  // Margin required between allocations - every free space must be at least that large.
6832  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6833  }
6834  else
6835  {
6836  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6837  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6838 
6839  // Margin required between allocations - previous allocation must be free.
6840  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6841  }
6842 
6843  calculatedOffset += subAlloc.size;
6844  prevFree = currFree;
6845  }
6846 
6847  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6848  // match expected one.
6849  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6850 
6851  VkDeviceSize lastSize = 0;
6852  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6853  {
6854  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6855 
6856  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6857  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6858  // They must be sorted by size ascending.
6859  VMA_VALIDATE(suballocItem->size >= lastSize);
6860 
6861  lastSize = suballocItem->size;
6862  }
6863 
6864  // Check if totals match calculacted values.
6865  VMA_VALIDATE(ValidateFreeSuballocationList());
6866  VMA_VALIDATE(calculatedOffset == GetSize());
6867  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6868  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6869 
6870  return true;
6871 }
6872 
6873 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6874 {
6875  if(!m_FreeSuballocationsBySize.empty())
6876  {
6877  return m_FreeSuballocationsBySize.back()->size;
6878  }
6879  else
6880  {
6881  return 0;
6882  }
6883 }
6884 
6885 bool VmaBlockMetadata_Generic::IsEmpty() const
6886 {
6887  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6888 }
6889 
6890 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6891 {
6892  outInfo.blockCount = 1;
6893 
6894  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6895  outInfo.allocationCount = rangeCount - m_FreeCount;
6896  outInfo.unusedRangeCount = m_FreeCount;
6897 
6898  outInfo.unusedBytes = m_SumFreeSize;
6899  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6900 
6901  outInfo.allocationSizeMin = UINT64_MAX;
6902  outInfo.allocationSizeMax = 0;
6903  outInfo.unusedRangeSizeMin = UINT64_MAX;
6904  outInfo.unusedRangeSizeMax = 0;
6905 
6906  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6907  suballocItem != m_Suballocations.cend();
6908  ++suballocItem)
6909  {
6910  const VmaSuballocation& suballoc = *suballocItem;
6911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6912  {
6913  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6914  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6915  }
6916  else
6917  {
6918  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6919  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6920  }
6921  }
6922 }
6923 
6924 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6925 {
6926  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6927 
6928  inoutStats.size += GetSize();
6929  inoutStats.unusedSize += m_SumFreeSize;
6930  inoutStats.allocationCount += rangeCount - m_FreeCount;
6931  inoutStats.unusedRangeCount += m_FreeCount;
6932  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6933 }
6934 
6935 #if VMA_STATS_STRING_ENABLED
6936 
6937 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6938 {
6939  PrintDetailedMap_Begin(json,
6940  m_SumFreeSize, // unusedBytes
6941  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6942  m_FreeCount); // unusedRangeCount
6943 
6944  size_t i = 0;
6945  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6946  suballocItem != m_Suballocations.cend();
6947  ++suballocItem, ++i)
6948  {
6949  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6950  {
6951  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6952  }
6953  else
6954  {
6955  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6956  }
6957  }
6958 
6959  PrintDetailedMap_End(json);
6960 }
6961 
6962 #endif // #if VMA_STATS_STRING_ENABLED
6963 
6964 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6965  uint32_t currentFrameIndex,
6966  uint32_t frameInUseCount,
6967  VkDeviceSize bufferImageGranularity,
6968  VkDeviceSize allocSize,
6969  VkDeviceSize allocAlignment,
6970  bool upperAddress,
6971  VmaSuballocationType allocType,
6972  bool canMakeOtherLost,
6973  uint32_t strategy,
6974  VmaAllocationRequest* pAllocationRequest)
6975 {
6976  VMA_ASSERT(allocSize > 0);
6977  VMA_ASSERT(!upperAddress);
6978  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6979  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6980  VMA_HEAVY_ASSERT(Validate());
6981 
6982  // There is not enough total free space in this block to fullfill the request: Early return.
6983  if(canMakeOtherLost == false &&
6984  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6985  {
6986  return false;
6987  }
6988 
6989  // New algorithm, efficiently searching freeSuballocationsBySize.
6990  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6991  if(freeSuballocCount > 0)
6992  {
6994  {
6995  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6996  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6997  m_FreeSuballocationsBySize.data(),
6998  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6999  allocSize + 2 * VMA_DEBUG_MARGIN,
7000  VmaSuballocationItemSizeLess());
7001  size_t index = it - m_FreeSuballocationsBySize.data();
7002  for(; index < freeSuballocCount; ++index)
7003  {
7004  if(CheckAllocation(
7005  currentFrameIndex,
7006  frameInUseCount,
7007  bufferImageGranularity,
7008  allocSize,
7009  allocAlignment,
7010  allocType,
7011  m_FreeSuballocationsBySize[index],
7012  false, // canMakeOtherLost
7013  &pAllocationRequest->offset,
7014  &pAllocationRequest->itemsToMakeLostCount,
7015  &pAllocationRequest->sumFreeSize,
7016  &pAllocationRequest->sumItemSize))
7017  {
7018  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7019  return true;
7020  }
7021  }
7022  }
7023  else // WORST_FIT, FIRST_FIT
7024  {
7025  // Search staring from biggest suballocations.
7026  for(size_t index = freeSuballocCount; index--; )
7027  {
7028  if(CheckAllocation(
7029  currentFrameIndex,
7030  frameInUseCount,
7031  bufferImageGranularity,
7032  allocSize,
7033  allocAlignment,
7034  allocType,
7035  m_FreeSuballocationsBySize[index],
7036  false, // canMakeOtherLost
7037  &pAllocationRequest->offset,
7038  &pAllocationRequest->itemsToMakeLostCount,
7039  &pAllocationRequest->sumFreeSize,
7040  &pAllocationRequest->sumItemSize))
7041  {
7042  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7043  return true;
7044  }
7045  }
7046  }
7047  }
7048 
7049  if(canMakeOtherLost)
7050  {
7051  // Brute-force algorithm. TODO: Come up with something better.
7052 
7053  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7054  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7055 
7056  VmaAllocationRequest tmpAllocRequest = {};
7057  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7058  suballocIt != m_Suballocations.end();
7059  ++suballocIt)
7060  {
7061  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7062  suballocIt->hAllocation->CanBecomeLost())
7063  {
7064  if(CheckAllocation(
7065  currentFrameIndex,
7066  frameInUseCount,
7067  bufferImageGranularity,
7068  allocSize,
7069  allocAlignment,
7070  allocType,
7071  suballocIt,
7072  canMakeOtherLost,
7073  &tmpAllocRequest.offset,
7074  &tmpAllocRequest.itemsToMakeLostCount,
7075  &tmpAllocRequest.sumFreeSize,
7076  &tmpAllocRequest.sumItemSize))
7077  {
7078  tmpAllocRequest.item = suballocIt;
7079 
7080  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7082  {
7083  *pAllocationRequest = tmpAllocRequest;
7084  }
7085  }
7086  }
7087  }
7088 
7089  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7090  {
7091  return true;
7092  }
7093  }
7094 
7095  return false;
7096 }
7097 
7098 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7099  uint32_t currentFrameIndex,
7100  uint32_t frameInUseCount,
7101  VmaAllocationRequest* pAllocationRequest)
7102 {
7103  while(pAllocationRequest->itemsToMakeLostCount > 0)
7104  {
7105  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7106  {
7107  ++pAllocationRequest->item;
7108  }
7109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7110  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7111  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7112  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7113  {
7114  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7115  --pAllocationRequest->itemsToMakeLostCount;
7116  }
7117  else
7118  {
7119  return false;
7120  }
7121  }
7122 
7123  VMA_HEAVY_ASSERT(Validate());
7124  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7125  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7126 
7127  return true;
7128 }
7129 
7130 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7131 {
7132  uint32_t lostAllocationCount = 0;
7133  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7134  it != m_Suballocations.end();
7135  ++it)
7136  {
7137  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7138  it->hAllocation->CanBecomeLost() &&
7139  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7140  {
7141  it = FreeSuballocation(it);
7142  ++lostAllocationCount;
7143  }
7144  }
7145  return lostAllocationCount;
7146 }
7147 
7148 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7149 {
7150  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7151  it != m_Suballocations.end();
7152  ++it)
7153  {
7154  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7155  {
7156  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7157  {
7158  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7159  return VK_ERROR_VALIDATION_FAILED_EXT;
7160  }
7161  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7162  {
7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7164  return VK_ERROR_VALIDATION_FAILED_EXT;
7165  }
7166  }
7167  }
7168 
7169  return VK_SUCCESS;
7170 }
7171 
7172 void VmaBlockMetadata_Generic::Alloc(
7173  const VmaAllocationRequest& request,
7174  VmaSuballocationType type,
7175  VkDeviceSize allocSize,
7176  bool upperAddress,
7177  VmaAllocation hAllocation)
7178 {
7179  VMA_ASSERT(!upperAddress);
7180  VMA_ASSERT(request.item != m_Suballocations.end());
7181  VmaSuballocation& suballoc = *request.item;
7182  // Given suballocation is a free block.
7183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7184  // Given offset is inside this suballocation.
7185  VMA_ASSERT(request.offset >= suballoc.offset);
7186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7189 
7190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7191  // it to become used.
7192  UnregisterFreeSuballocation(request.item);
7193 
7194  suballoc.offset = request.offset;
7195  suballoc.size = allocSize;
7196  suballoc.type = type;
7197  suballoc.hAllocation = hAllocation;
7198 
7199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7200  if(paddingEnd)
7201  {
7202  VmaSuballocation paddingSuballoc = {};
7203  paddingSuballoc.offset = request.offset + allocSize;
7204  paddingSuballoc.size = paddingEnd;
7205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7206  VmaSuballocationList::iterator next = request.item;
7207  ++next;
7208  const VmaSuballocationList::iterator paddingEndItem =
7209  m_Suballocations.insert(next, paddingSuballoc);
7210  RegisterFreeSuballocation(paddingEndItem);
7211  }
7212 
7213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7214  if(paddingBegin)
7215  {
7216  VmaSuballocation paddingSuballoc = {};
7217  paddingSuballoc.offset = request.offset - paddingBegin;
7218  paddingSuballoc.size = paddingBegin;
7219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7220  const VmaSuballocationList::iterator paddingBeginItem =
7221  m_Suballocations.insert(request.item, paddingSuballoc);
7222  RegisterFreeSuballocation(paddingBeginItem);
7223  }
7224 
7225  // Update totals.
7226  m_FreeCount = m_FreeCount - 1;
7227  if(paddingBegin > 0)
7228  {
7229  ++m_FreeCount;
7230  }
7231  if(paddingEnd > 0)
7232  {
7233  ++m_FreeCount;
7234  }
7235  m_SumFreeSize -= allocSize;
7236 }
7237 
7238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7239 {
7240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7241  suballocItem != m_Suballocations.end();
7242  ++suballocItem)
7243  {
7244  VmaSuballocation& suballoc = *suballocItem;
7245  if(suballoc.hAllocation == allocation)
7246  {
7247  FreeSuballocation(suballocItem);
7248  VMA_HEAVY_ASSERT(Validate());
7249  return;
7250  }
7251  }
7252  VMA_ASSERT(0 && "Not found!");
7253 }
7254 
7255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7256 {
7257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7258  suballocItem != m_Suballocations.end();
7259  ++suballocItem)
7260  {
7261  VmaSuballocation& suballoc = *suballocItem;
7262  if(suballoc.offset == offset)
7263  {
7264  FreeSuballocation(suballocItem);
7265  return;
7266  }
7267  }
7268  VMA_ASSERT(0 && "Not found!");
7269 }
7270 
7271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7272 {
7273  typedef VmaSuballocationList::iterator iter_type;
7274  for(iter_type suballocItem = m_Suballocations.begin();
7275  suballocItem != m_Suballocations.end();
7276  ++suballocItem)
7277  {
7278  VmaSuballocation& suballoc = *suballocItem;
7279  if(suballoc.hAllocation == alloc)
7280  {
7281  iter_type nextItem = suballocItem;
7282  ++nextItem;
7283 
7284  // Should have been ensured on higher level.
7285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7286 
7287  // Shrinking.
7288  if(newSize < alloc->GetSize())
7289  {
7290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7291 
7292  // There is next item.
7293  if(nextItem != m_Suballocations.end())
7294  {
7295  // Next item is free.
7296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7297  {
7298  // Grow this next item backward.
7299  UnregisterFreeSuballocation(nextItem);
7300  nextItem->offset -= sizeDiff;
7301  nextItem->size += sizeDiff;
7302  RegisterFreeSuballocation(nextItem);
7303  }
7304  // Next item is not free.
7305  else
7306  {
7307  // Create free item after current one.
7308  VmaSuballocation newFreeSuballoc;
7309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7310  newFreeSuballoc.offset = suballoc.offset + newSize;
7311  newFreeSuballoc.size = sizeDiff;
7312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7314  RegisterFreeSuballocation(newFreeSuballocIt);
7315 
7316  ++m_FreeCount;
7317  }
7318  }
7319  // This is the last item.
7320  else
7321  {
7322  // Create free item at the end.
7323  VmaSuballocation newFreeSuballoc;
7324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7325  newFreeSuballoc.offset = suballoc.offset + newSize;
7326  newFreeSuballoc.size = sizeDiff;
7327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7328  m_Suballocations.push_back(newFreeSuballoc);
7329 
7330  iter_type newFreeSuballocIt = m_Suballocations.end();
7331  RegisterFreeSuballocation(--newFreeSuballocIt);
7332 
7333  ++m_FreeCount;
7334  }
7335 
7336  suballoc.size = newSize;
7337  m_SumFreeSize += sizeDiff;
7338  }
7339  // Growing.
7340  else
7341  {
7342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7343 
7344  // There is next item.
7345  if(nextItem != m_Suballocations.end())
7346  {
7347  // Next item is free.
7348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7349  {
7350  // There is not enough free space, including margin.
7351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7352  {
7353  return false;
7354  }
7355 
7356  // There is more free space than required.
7357  if(nextItem->size > sizeDiff)
7358  {
7359  // Move and shrink this next item.
7360  UnregisterFreeSuballocation(nextItem);
7361  nextItem->offset += sizeDiff;
7362  nextItem->size -= sizeDiff;
7363  RegisterFreeSuballocation(nextItem);
7364  }
7365  // There is exactly the amount of free space required.
7366  else
7367  {
7368  // Remove this next free item.
7369  UnregisterFreeSuballocation(nextItem);
7370  m_Suballocations.erase(nextItem);
7371  --m_FreeCount;
7372  }
7373  }
7374  // Next item is not free - there is no space to grow.
7375  else
7376  {
7377  return false;
7378  }
7379  }
7380  // This is the last item - there is no space to grow.
7381  else
7382  {
7383  return false;
7384  }
7385 
7386  suballoc.size = newSize;
7387  m_SumFreeSize -= sizeDiff;
7388  }
7389 
7390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7391  return true;
7392  }
7393  }
7394  VMA_ASSERT(0 && "Not found!");
7395  return false;
7396 }
7397 
7398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7399 {
7400  VkDeviceSize lastSize = 0;
7401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7402  {
7403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7404 
7405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7407  VMA_VALIDATE(it->size >= lastSize);
7408  lastSize = it->size;
7409  }
7410  return true;
7411 }
7412 
7413 bool VmaBlockMetadata_Generic::CheckAllocation(
7414  uint32_t currentFrameIndex,
7415  uint32_t frameInUseCount,
7416  VkDeviceSize bufferImageGranularity,
7417  VkDeviceSize allocSize,
7418  VkDeviceSize allocAlignment,
7419  VmaSuballocationType allocType,
7420  VmaSuballocationList::const_iterator suballocItem,
7421  bool canMakeOtherLost,
7422  VkDeviceSize* pOffset,
7423  size_t* itemsToMakeLostCount,
7424  VkDeviceSize* pSumFreeSize,
7425  VkDeviceSize* pSumItemSize) const
7426 {
7427  VMA_ASSERT(allocSize > 0);
7428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7430  VMA_ASSERT(pOffset != VMA_NULL);
7431 
7432  *itemsToMakeLostCount = 0;
7433  *pSumFreeSize = 0;
7434  *pSumItemSize = 0;
7435 
7436  if(canMakeOtherLost)
7437  {
7438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7439  {
7440  *pSumFreeSize = suballocItem->size;
7441  }
7442  else
7443  {
7444  if(suballocItem->hAllocation->CanBecomeLost() &&
7445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7446  {
7447  ++*itemsToMakeLostCount;
7448  *pSumItemSize = suballocItem->size;
7449  }
7450  else
7451  {
7452  return false;
7453  }
7454  }
7455 
7456  // Remaining size is too small for this request: Early return.
7457  if(GetSize() - suballocItem->offset < allocSize)
7458  {
7459  return false;
7460  }
7461 
7462  // Start from offset equal to beginning of this suballocation.
7463  *pOffset = suballocItem->offset;
7464 
7465  // Apply VMA_DEBUG_MARGIN at the beginning.
7466  if(VMA_DEBUG_MARGIN > 0)
7467  {
7468  *pOffset += VMA_DEBUG_MARGIN;
7469  }
7470 
7471  // Apply alignment.
7472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7473 
7474  // Check previous suballocations for BufferImageGranularity conflicts.
7475  // Make bigger alignment if necessary.
7476  if(bufferImageGranularity > 1)
7477  {
7478  bool bufferImageGranularityConflict = false;
7479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7480  while(prevSuballocItem != m_Suballocations.cbegin())
7481  {
7482  --prevSuballocItem;
7483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7485  {
7486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7487  {
7488  bufferImageGranularityConflict = true;
7489  break;
7490  }
7491  }
7492  else
7493  // Already on previous page.
7494  break;
7495  }
7496  if(bufferImageGranularityConflict)
7497  {
7498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7499  }
7500  }
7501 
7502  // Now that we have final *pOffset, check if we are past suballocItem.
7503  // If yes, return false - this function should be called for another suballocItem as starting point.
7504  if(*pOffset >= suballocItem->offset + suballocItem->size)
7505  {
7506  return false;
7507  }
7508 
7509  // Calculate padding at the beginning based on current offset.
7510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7511 
7512  // Calculate required margin at the end.
7513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7514 
7515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7516  // Another early return check.
7517  if(suballocItem->offset + totalSize > GetSize())
7518  {
7519  return false;
7520  }
7521 
7522  // Advance lastSuballocItem until desired size is reached.
7523  // Update itemsToMakeLostCount.
7524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7525  if(totalSize > suballocItem->size)
7526  {
7527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7528  while(remainingSize > 0)
7529  {
7530  ++lastSuballocItem;
7531  if(lastSuballocItem == m_Suballocations.cend())
7532  {
7533  return false;
7534  }
7535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7536  {
7537  *pSumFreeSize += lastSuballocItem->size;
7538  }
7539  else
7540  {
7541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7544  {
7545  ++*itemsToMakeLostCount;
7546  *pSumItemSize += lastSuballocItem->size;
7547  }
7548  else
7549  {
7550  return false;
7551  }
7552  }
7553  remainingSize = (lastSuballocItem->size < remainingSize) ?
7554  remainingSize - lastSuballocItem->size : 0;
7555  }
7556  }
7557 
7558  // Check next suballocations for BufferImageGranularity conflicts.
7559  // If conflict exists, we must mark more allocations lost or fail.
7560  if(bufferImageGranularity > 1)
7561  {
7562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7563  ++nextSuballocItem;
7564  while(nextSuballocItem != m_Suballocations.cend())
7565  {
7566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7568  {
7569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7570  {
7571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7574  {
7575  ++*itemsToMakeLostCount;
7576  }
7577  else
7578  {
7579  return false;
7580  }
7581  }
7582  }
7583  else
7584  {
7585  // Already on next page.
7586  break;
7587  }
7588  ++nextSuballocItem;
7589  }
7590  }
7591  }
7592  else
7593  {
7594  const VmaSuballocation& suballoc = *suballocItem;
7595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7596 
7597  *pSumFreeSize = suballoc.size;
7598 
7599  // Size of this suballocation is too small for this request: Early return.
7600  if(suballoc.size < allocSize)
7601  {
7602  return false;
7603  }
7604 
7605  // Start from offset equal to beginning of this suballocation.
7606  *pOffset = suballoc.offset;
7607 
7608  // Apply VMA_DEBUG_MARGIN at the beginning.
7609  if(VMA_DEBUG_MARGIN > 0)
7610  {
7611  *pOffset += VMA_DEBUG_MARGIN;
7612  }
7613 
7614  // Apply alignment.
7615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7616 
7617  // Check previous suballocations for BufferImageGranularity conflicts.
7618  // Make bigger alignment if necessary.
7619  if(bufferImageGranularity > 1)
7620  {
7621  bool bufferImageGranularityConflict = false;
7622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7623  while(prevSuballocItem != m_Suballocations.cbegin())
7624  {
7625  --prevSuballocItem;
7626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7628  {
7629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7630  {
7631  bufferImageGranularityConflict = true;
7632  break;
7633  }
7634  }
7635  else
7636  // Already on previous page.
7637  break;
7638  }
7639  if(bufferImageGranularityConflict)
7640  {
7641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7642  }
7643  }
7644 
7645  // Calculate padding at the beginning based on current offset.
7646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7647 
7648  // Calculate required margin at the end.
7649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7650 
7651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7653  {
7654  return false;
7655  }
7656 
7657  // Check next suballocations for BufferImageGranularity conflicts.
7658  // If conflict exists, allocation cannot be made here.
7659  if(bufferImageGranularity > 1)
7660  {
7661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7662  ++nextSuballocItem;
7663  while(nextSuballocItem != m_Suballocations.cend())
7664  {
7665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7667  {
7668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7669  {
7670  return false;
7671  }
7672  }
7673  else
7674  {
7675  // Already on next page.
7676  break;
7677  }
7678  ++nextSuballocItem;
7679  }
7680  }
7681  }
7682 
7683  // All tests passed: Success. pOffset is already filled.
7684  return true;
7685 }
7686 
7687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7688 {
7689  VMA_ASSERT(item != m_Suballocations.end());
7690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7691 
7692  VmaSuballocationList::iterator nextItem = item;
7693  ++nextItem;
7694  VMA_ASSERT(nextItem != m_Suballocations.end());
7695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7696 
7697  item->size += nextItem->size;
7698  --m_FreeCount;
7699  m_Suballocations.erase(nextItem);
7700 }
7701 
7702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7703 {
7704  // Change this suballocation to be marked as free.
7705  VmaSuballocation& suballoc = *suballocItem;
7706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7707  suballoc.hAllocation = VK_NULL_HANDLE;
7708 
7709  // Update totals.
7710  ++m_FreeCount;
7711  m_SumFreeSize += suballoc.size;
7712 
7713  // Merge with previous and/or next suballocation if it's also free.
7714  bool mergeWithNext = false;
7715  bool mergeWithPrev = false;
7716 
7717  VmaSuballocationList::iterator nextItem = suballocItem;
7718  ++nextItem;
7719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7720  {
7721  mergeWithNext = true;
7722  }
7723 
7724  VmaSuballocationList::iterator prevItem = suballocItem;
7725  if(suballocItem != m_Suballocations.begin())
7726  {
7727  --prevItem;
7728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7729  {
7730  mergeWithPrev = true;
7731  }
7732  }
7733 
7734  if(mergeWithNext)
7735  {
7736  UnregisterFreeSuballocation(nextItem);
7737  MergeFreeWithNext(suballocItem);
7738  }
7739 
7740  if(mergeWithPrev)
7741  {
7742  UnregisterFreeSuballocation(prevItem);
7743  MergeFreeWithNext(prevItem);
7744  RegisterFreeSuballocation(prevItem);
7745  return prevItem;
7746  }
7747  else
7748  {
7749  RegisterFreeSuballocation(suballocItem);
7750  return suballocItem;
7751  }
7752 }
7753 
7754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7755 {
7756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7757  VMA_ASSERT(item->size > 0);
7758 
7759  // You may want to enable this validation at the beginning or at the end of
7760  // this function, depending on what do you want to check.
7761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7762 
7763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7764  {
7765  if(m_FreeSuballocationsBySize.empty())
7766  {
7767  m_FreeSuballocationsBySize.push_back(item);
7768  }
7769  else
7770  {
7771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7772  }
7773  }
7774 
7775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7776 }
7777 
7778 
7779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7780 {
7781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7782  VMA_ASSERT(item->size > 0);
7783 
7784  // You may want to enable this validation at the beginning or at the end of
7785  // this function, depending on what do you want to check.
7786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7787 
7788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7789  {
7790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7791  m_FreeSuballocationsBySize.data(),
7792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7793  item,
7794  VmaSuballocationItemSizeLess());
7795  for(size_t index = it - m_FreeSuballocationsBySize.data();
7796  index < m_FreeSuballocationsBySize.size();
7797  ++index)
7798  {
7799  if(m_FreeSuballocationsBySize[index] == item)
7800  {
7801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7802  return;
7803  }
7804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7805  }
7806  VMA_ASSERT(0 && "Not found.");
7807  }
7808 
7809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7810 }
7811 
7813 // class VmaBlockMetadata_Linear
7814 
7815 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7816  VmaBlockMetadata(hAllocator),
7817  m_SumFreeSize(0),
7818  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7819  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7820  m_1stVectorIndex(0),
7821  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7822  m_1stNullItemsBeginCount(0),
7823  m_1stNullItemsMiddleCount(0),
7824  m_2ndNullItemsCount(0)
7825 {
7826 }
7827 
7828 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7829 {
7830 }
7831 
7832 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7833 {
7834  VmaBlockMetadata::Init(size);
7835  m_SumFreeSize = size;
7836 }
7837 
7838 bool VmaBlockMetadata_Linear::Validate() const
7839 {
7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7842 
7843  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7844  VMA_VALIDATE(!suballocations1st.empty() ||
7845  suballocations2nd.empty() ||
7846  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7847 
7848  if(!suballocations1st.empty())
7849  {
7850  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7851  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7852  // Null item at the end should be just pop_back().
7853  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7854  }
7855  if(!suballocations2nd.empty())
7856  {
7857  // Null item at the end should be just pop_back().
7858  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7859  }
7860 
7861  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7862  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7863 
7864  VkDeviceSize sumUsedSize = 0;
7865  const size_t suballoc1stCount = suballocations1st.size();
7866  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7867 
7868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7869  {
7870  const size_t suballoc2ndCount = suballocations2nd.size();
7871  size_t nullItem2ndCount = 0;
7872  for(size_t i = 0; i < suballoc2ndCount; ++i)
7873  {
7874  const VmaSuballocation& suballoc = suballocations2nd[i];
7875  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7876 
7877  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7878  VMA_VALIDATE(suballoc.offset >= offset);
7879 
7880  if(!currFree)
7881  {
7882  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7883  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7884  sumUsedSize += suballoc.size;
7885  }
7886  else
7887  {
7888  ++nullItem2ndCount;
7889  }
7890 
7891  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7892  }
7893 
7894  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7895  }
7896 
7897  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7898  {
7899  const VmaSuballocation& suballoc = suballocations1st[i];
7900  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7901  suballoc.hAllocation == VK_NULL_HANDLE);
7902  }
7903 
7904  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7905 
7906  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7907  {
7908  const VmaSuballocation& suballoc = suballocations1st[i];
7909  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7910 
7911  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7912  VMA_VALIDATE(suballoc.offset >= offset);
7913  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7914 
7915  if(!currFree)
7916  {
7917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7919  sumUsedSize += suballoc.size;
7920  }
7921  else
7922  {
7923  ++nullItem1stCount;
7924  }
7925 
7926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7927  }
7928  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7929 
7930  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7931  {
7932  const size_t suballoc2ndCount = suballocations2nd.size();
7933  size_t nullItem2ndCount = 0;
7934  for(size_t i = suballoc2ndCount; i--; )
7935  {
7936  const VmaSuballocation& suballoc = suballocations2nd[i];
7937  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7938 
7939  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7940  VMA_VALIDATE(suballoc.offset >= offset);
7941 
7942  if(!currFree)
7943  {
7944  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7945  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7946  sumUsedSize += suballoc.size;
7947  }
7948  else
7949  {
7950  ++nullItem2ndCount;
7951  }
7952 
7953  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7954  }
7955 
7956  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7957  }
7958 
7959  VMA_VALIDATE(offset <= GetSize());
7960  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7961 
7962  return true;
7963 }
7964 
7965 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7966 {
7967  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7968  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7969 }
7970 
7971 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7972 {
7973  const VkDeviceSize size = GetSize();
7974 
7975  /*
7976  We don't consider gaps inside allocation vectors with freed allocations because
7977  they are not suitable for reuse in linear allocator. We consider only space that
7978  is available for new allocations.
7979  */
7980  if(IsEmpty())
7981  {
7982  return size;
7983  }
7984 
7985  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7986 
7987  switch(m_2ndVectorMode)
7988  {
7989  case SECOND_VECTOR_EMPTY:
7990  /*
7991  Available space is after end of 1st, as well as before beginning of 1st (which
7992  whould make it a ring buffer).
7993  */
7994  {
7995  const size_t suballocations1stCount = suballocations1st.size();
7996  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7997  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7998  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7999  return VMA_MAX(
8000  firstSuballoc.offset,
8001  size - (lastSuballoc.offset + lastSuballoc.size));
8002  }
8003  break;
8004 
8005  case SECOND_VECTOR_RING_BUFFER:
8006  /*
8007  Available space is only between end of 2nd and beginning of 1st.
8008  */
8009  {
8010  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8011  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8012  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8013  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8014  }
8015  break;
8016 
8017  case SECOND_VECTOR_DOUBLE_STACK:
8018  /*
8019  Available space is only between end of 1st and top of 2nd.
8020  */
8021  {
8022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8023  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8024  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8025  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8026  }
8027  break;
8028 
8029  default:
8030  VMA_ASSERT(0);
8031  return 0;
8032  }
8033 }
8034 
8035 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8036 {
8037  const VkDeviceSize size = GetSize();
8038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8039  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8040  const size_t suballoc1stCount = suballocations1st.size();
8041  const size_t suballoc2ndCount = suballocations2nd.size();
8042 
8043  outInfo.blockCount = 1;
8044  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8045  outInfo.unusedRangeCount = 0;
8046  outInfo.usedBytes = 0;
8047  outInfo.allocationSizeMin = UINT64_MAX;
8048  outInfo.allocationSizeMax = 0;
8049  outInfo.unusedRangeSizeMin = UINT64_MAX;
8050  outInfo.unusedRangeSizeMax = 0;
8051 
8052  VkDeviceSize lastOffset = 0;
8053 
8054  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8055  {
8056  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8057  size_t nextAlloc2ndIndex = 0;
8058  while(lastOffset < freeSpace2ndTo1stEnd)
8059  {
8060  // Find next non-null allocation or move nextAllocIndex to the end.
8061  while(nextAlloc2ndIndex < suballoc2ndCount &&
8062  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8063  {
8064  ++nextAlloc2ndIndex;
8065  }
8066 
8067  // Found non-null allocation.
8068  if(nextAlloc2ndIndex < suballoc2ndCount)
8069  {
8070  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8071 
8072  // 1. Process free space before this allocation.
8073  if(lastOffset < suballoc.offset)
8074  {
8075  // There is free space from lastOffset to suballoc.offset.
8076  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8077  ++outInfo.unusedRangeCount;
8078  outInfo.unusedBytes += unusedRangeSize;
8079  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8080  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8081  }
8082 
8083  // 2. Process this allocation.
8084  // There is allocation with suballoc.offset, suballoc.size.
8085  outInfo.usedBytes += suballoc.size;
8086  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8087  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8088 
8089  // 3. Prepare for next iteration.
8090  lastOffset = suballoc.offset + suballoc.size;
8091  ++nextAlloc2ndIndex;
8092  }
8093  // We are at the end.
8094  else
8095  {
8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8097  if(lastOffset < freeSpace2ndTo1stEnd)
8098  {
8099  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8100  ++outInfo.unusedRangeCount;
8101  outInfo.unusedBytes += unusedRangeSize;
8102  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8103  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8104  }
8105 
8106  // End of loop.
8107  lastOffset = freeSpace2ndTo1stEnd;
8108  }
8109  }
8110  }
8111 
8112  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8113  const VkDeviceSize freeSpace1stTo2ndEnd =
8114  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8115  while(lastOffset < freeSpace1stTo2ndEnd)
8116  {
8117  // Find next non-null allocation or move nextAllocIndex to the end.
8118  while(nextAlloc1stIndex < suballoc1stCount &&
8119  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8120  {
8121  ++nextAlloc1stIndex;
8122  }
8123 
8124  // Found non-null allocation.
8125  if(nextAlloc1stIndex < suballoc1stCount)
8126  {
8127  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8128 
8129  // 1. Process free space before this allocation.
8130  if(lastOffset < suballoc.offset)
8131  {
8132  // There is free space from lastOffset to suballoc.offset.
8133  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8134  ++outInfo.unusedRangeCount;
8135  outInfo.unusedBytes += unusedRangeSize;
8136  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8137  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8138  }
8139 
8140  // 2. Process this allocation.
8141  // There is allocation with suballoc.offset, suballoc.size.
8142  outInfo.usedBytes += suballoc.size;
8143  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8144  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8145 
8146  // 3. Prepare for next iteration.
8147  lastOffset = suballoc.offset + suballoc.size;
8148  ++nextAlloc1stIndex;
8149  }
8150  // We are at the end.
8151  else
8152  {
8153  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8154  if(lastOffset < freeSpace1stTo2ndEnd)
8155  {
8156  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8157  ++outInfo.unusedRangeCount;
8158  outInfo.unusedBytes += unusedRangeSize;
8159  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8160  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8161  }
8162 
8163  // End of loop.
8164  lastOffset = freeSpace1stTo2ndEnd;
8165  }
8166  }
8167 
8168  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8169  {
8170  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8171  while(lastOffset < size)
8172  {
8173  // Find next non-null allocation or move nextAllocIndex to the end.
8174  while(nextAlloc2ndIndex != SIZE_MAX &&
8175  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8176  {
8177  --nextAlloc2ndIndex;
8178  }
8179 
8180  // Found non-null allocation.
8181  if(nextAlloc2ndIndex != SIZE_MAX)
8182  {
8183  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8184 
8185  // 1. Process free space before this allocation.
8186  if(lastOffset < suballoc.offset)
8187  {
8188  // There is free space from lastOffset to suballoc.offset.
8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8190  ++outInfo.unusedRangeCount;
8191  outInfo.unusedBytes += unusedRangeSize;
8192  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8193  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8194  }
8195 
8196  // 2. Process this allocation.
8197  // There is allocation with suballoc.offset, suballoc.size.
8198  outInfo.usedBytes += suballoc.size;
8199  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8200  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8201 
8202  // 3. Prepare for next iteration.
8203  lastOffset = suballoc.offset + suballoc.size;
8204  --nextAlloc2ndIndex;
8205  }
8206  // We are at the end.
8207  else
8208  {
8209  // There is free space from lastOffset to size.
8210  if(lastOffset < size)
8211  {
8212  const VkDeviceSize unusedRangeSize = size - lastOffset;
8213  ++outInfo.unusedRangeCount;
8214  outInfo.unusedBytes += unusedRangeSize;
8215  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8216  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8217  }
8218 
8219  // End of loop.
8220  lastOffset = size;
8221  }
8222  }
8223  }
8224 
8225  outInfo.unusedBytes = size - outInfo.usedBytes;
8226 }
8227 
8228 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8229 {
8230  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8231  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8232  const VkDeviceSize size = GetSize();
8233  const size_t suballoc1stCount = suballocations1st.size();
8234  const size_t suballoc2ndCount = suballocations2nd.size();
8235 
8236  inoutStats.size += size;
8237 
8238  VkDeviceSize lastOffset = 0;
8239 
8240  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8241  {
8242  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8243  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8244  while(lastOffset < freeSpace2ndTo1stEnd)
8245  {
8246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8247  while(nextAlloc2ndIndex < suballoc2ndCount &&
8248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8249  {
8250  ++nextAlloc2ndIndex;
8251  }
8252 
8253  // Found non-null allocation.
8254  if(nextAlloc2ndIndex < suballoc2ndCount)
8255  {
8256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8257 
8258  // 1. Process free space before this allocation.
8259  if(lastOffset < suballoc.offset)
8260  {
8261  // There is free space from lastOffset to suballoc.offset.
8262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8263  inoutStats.unusedSize += unusedRangeSize;
8264  ++inoutStats.unusedRangeCount;
8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8266  }
8267 
8268  // 2. Process this allocation.
8269  // There is allocation with suballoc.offset, suballoc.size.
8270  ++inoutStats.allocationCount;
8271 
8272  // 3. Prepare for next iteration.
8273  lastOffset = suballoc.offset + suballoc.size;
8274  ++nextAlloc2ndIndex;
8275  }
8276  // We are at the end.
8277  else
8278  {
8279  if(lastOffset < freeSpace2ndTo1stEnd)
8280  {
8281  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8282  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8283  inoutStats.unusedSize += unusedRangeSize;
8284  ++inoutStats.unusedRangeCount;
8285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8286  }
8287 
8288  // End of loop.
8289  lastOffset = freeSpace2ndTo1stEnd;
8290  }
8291  }
8292  }
8293 
8294  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8295  const VkDeviceSize freeSpace1stTo2ndEnd =
8296  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8297  while(lastOffset < freeSpace1stTo2ndEnd)
8298  {
8299  // Find next non-null allocation or move nextAllocIndex to the end.
8300  while(nextAlloc1stIndex < suballoc1stCount &&
8301  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8302  {
8303  ++nextAlloc1stIndex;
8304  }
8305 
8306  // Found non-null allocation.
8307  if(nextAlloc1stIndex < suballoc1stCount)
8308  {
8309  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8310 
8311  // 1. Process free space before this allocation.
8312  if(lastOffset < suballoc.offset)
8313  {
8314  // There is free space from lastOffset to suballoc.offset.
8315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8316  inoutStats.unusedSize += unusedRangeSize;
8317  ++inoutStats.unusedRangeCount;
8318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8319  }
8320 
8321  // 2. Process this allocation.
8322  // There is allocation with suballoc.offset, suballoc.size.
8323  ++inoutStats.allocationCount;
8324 
8325  // 3. Prepare for next iteration.
8326  lastOffset = suballoc.offset + suballoc.size;
8327  ++nextAlloc1stIndex;
8328  }
8329  // We are at the end.
8330  else
8331  {
8332  if(lastOffset < freeSpace1stTo2ndEnd)
8333  {
8334  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8335  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8336  inoutStats.unusedSize += unusedRangeSize;
8337  ++inoutStats.unusedRangeCount;
8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8339  }
8340 
8341  // End of loop.
8342  lastOffset = freeSpace1stTo2ndEnd;
8343  }
8344  }
8345 
8346  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8347  {
8348  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8349  while(lastOffset < size)
8350  {
8351  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8352  while(nextAlloc2ndIndex != SIZE_MAX &&
8353  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8354  {
8355  --nextAlloc2ndIndex;
8356  }
8357 
8358  // Found non-null allocation.
8359  if(nextAlloc2ndIndex != SIZE_MAX)
8360  {
8361  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8362 
8363  // 1. Process free space before this allocation.
8364  if(lastOffset < suballoc.offset)
8365  {
8366  // There is free space from lastOffset to suballoc.offset.
8367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8368  inoutStats.unusedSize += unusedRangeSize;
8369  ++inoutStats.unusedRangeCount;
8370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8371  }
8372 
8373  // 2. Process this allocation.
8374  // There is allocation with suballoc.offset, suballoc.size.
8375  ++inoutStats.allocationCount;
8376 
8377  // 3. Prepare for next iteration.
8378  lastOffset = suballoc.offset + suballoc.size;
8379  --nextAlloc2ndIndex;
8380  }
8381  // We are at the end.
8382  else
8383  {
8384  if(lastOffset < size)
8385  {
8386  // There is free space from lastOffset to size.
8387  const VkDeviceSize unusedRangeSize = size - lastOffset;
8388  inoutStats.unusedSize += unusedRangeSize;
8389  ++inoutStats.unusedRangeCount;
8390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8391  }
8392 
8393  // End of loop.
8394  lastOffset = size;
8395  }
8396  }
8397  }
8398 }
8399 
8400 #if VMA_STATS_STRING_ENABLED
8401 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8402 {
8403  const VkDeviceSize size = GetSize();
8404  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8405  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8406  const size_t suballoc1stCount = suballocations1st.size();
8407  const size_t suballoc2ndCount = suballocations2nd.size();
8408 
8409  // FIRST PASS
8410 
8411  size_t unusedRangeCount = 0;
8412  VkDeviceSize usedBytes = 0;
8413 
8414  VkDeviceSize lastOffset = 0;
8415 
8416  size_t alloc2ndCount = 0;
8417  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8418  {
8419  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8420  size_t nextAlloc2ndIndex = 0;
8421  while(lastOffset < freeSpace2ndTo1stEnd)
8422  {
8423  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8424  while(nextAlloc2ndIndex < suballoc2ndCount &&
8425  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8426  {
8427  ++nextAlloc2ndIndex;
8428  }
8429 
8430  // Found non-null allocation.
8431  if(nextAlloc2ndIndex < suballoc2ndCount)
8432  {
8433  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8434 
8435  // 1. Process free space before this allocation.
8436  if(lastOffset < suballoc.offset)
8437  {
8438  // There is free space from lastOffset to suballoc.offset.
8439  ++unusedRangeCount;
8440  }
8441 
8442  // 2. Process this allocation.
8443  // There is allocation with suballoc.offset, suballoc.size.
8444  ++alloc2ndCount;
8445  usedBytes += suballoc.size;
8446 
8447  // 3. Prepare for next iteration.
8448  lastOffset = suballoc.offset + suballoc.size;
8449  ++nextAlloc2ndIndex;
8450  }
8451  // We are at the end.
8452  else
8453  {
8454  if(lastOffset < freeSpace2ndTo1stEnd)
8455  {
8456  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8457  ++unusedRangeCount;
8458  }
8459 
8460  // End of loop.
8461  lastOffset = freeSpace2ndTo1stEnd;
8462  }
8463  }
8464  }
8465 
8466  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8467  size_t alloc1stCount = 0;
8468  const VkDeviceSize freeSpace1stTo2ndEnd =
8469  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8470  while(lastOffset < freeSpace1stTo2ndEnd)
8471  {
8472  // Find next non-null allocation or move nextAllocIndex to the end.
8473  while(nextAlloc1stIndex < suballoc1stCount &&
8474  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8475  {
8476  ++nextAlloc1stIndex;
8477  }
8478 
8479  // Found non-null allocation.
8480  if(nextAlloc1stIndex < suballoc1stCount)
8481  {
8482  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8483 
8484  // 1. Process free space before this allocation.
8485  if(lastOffset < suballoc.offset)
8486  {
8487  // There is free space from lastOffset to suballoc.offset.
8488  ++unusedRangeCount;
8489  }
8490 
8491  // 2. Process this allocation.
8492  // There is allocation with suballoc.offset, suballoc.size.
8493  ++alloc1stCount;
8494  usedBytes += suballoc.size;
8495 
8496  // 3. Prepare for next iteration.
8497  lastOffset = suballoc.offset + suballoc.size;
8498  ++nextAlloc1stIndex;
8499  }
8500  // We are at the end.
8501  else
8502  {
8503  if(lastOffset < size)
8504  {
8505  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8506  ++unusedRangeCount;
8507  }
8508 
8509  // End of loop.
8510  lastOffset = freeSpace1stTo2ndEnd;
8511  }
8512  }
8513 
8514  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8515  {
8516  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8517  while(lastOffset < size)
8518  {
8519  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8520  while(nextAlloc2ndIndex != SIZE_MAX &&
8521  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8522  {
8523  --nextAlloc2ndIndex;
8524  }
8525 
8526  // Found non-null allocation.
8527  if(nextAlloc2ndIndex != SIZE_MAX)
8528  {
8529  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8530 
8531  // 1. Process free space before this allocation.
8532  if(lastOffset < suballoc.offset)
8533  {
8534  // There is free space from lastOffset to suballoc.offset.
8535  ++unusedRangeCount;
8536  }
8537 
8538  // 2. Process this allocation.
8539  // There is allocation with suballoc.offset, suballoc.size.
8540  ++alloc2ndCount;
8541  usedBytes += suballoc.size;
8542 
8543  // 3. Prepare for next iteration.
8544  lastOffset = suballoc.offset + suballoc.size;
8545  --nextAlloc2ndIndex;
8546  }
8547  // We are at the end.
8548  else
8549  {
8550  if(lastOffset < size)
8551  {
8552  // There is free space from lastOffset to size.
8553  ++unusedRangeCount;
8554  }
8555 
8556  // End of loop.
8557  lastOffset = size;
8558  }
8559  }
8560  }
8561 
8562  const VkDeviceSize unusedBytes = size - usedBytes;
8563  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8564 
8565  // SECOND PASS
8566  lastOffset = 0;
8567 
8568  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8569  {
8570  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8571  size_t nextAlloc2ndIndex = 0;
8572  while(lastOffset < freeSpace2ndTo1stEnd)
8573  {
8574  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8575  while(nextAlloc2ndIndex < suballoc2ndCount &&
8576  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8577  {
8578  ++nextAlloc2ndIndex;
8579  }
8580 
8581  // Found non-null allocation.
8582  if(nextAlloc2ndIndex < suballoc2ndCount)
8583  {
8584  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8585 
8586  // 1. Process free space before this allocation.
8587  if(lastOffset < suballoc.offset)
8588  {
8589  // There is free space from lastOffset to suballoc.offset.
8590  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8592  }
8593 
8594  // 2. Process this allocation.
8595  // There is allocation with suballoc.offset, suballoc.size.
8596  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8597 
8598  // 3. Prepare for next iteration.
8599  lastOffset = suballoc.offset + suballoc.size;
8600  ++nextAlloc2ndIndex;
8601  }
8602  // We are at the end.
8603  else
8604  {
8605  if(lastOffset < freeSpace2ndTo1stEnd)
8606  {
8607  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8608  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8609  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8610  }
8611 
8612  // End of loop.
8613  lastOffset = freeSpace2ndTo1stEnd;
8614  }
8615  }
8616  }
8617 
8618  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8619  while(lastOffset < freeSpace1stTo2ndEnd)
8620  {
8621  // Find next non-null allocation or move nextAllocIndex to the end.
8622  while(nextAlloc1stIndex < suballoc1stCount &&
8623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8624  {
8625  ++nextAlloc1stIndex;
8626  }
8627 
8628  // Found non-null allocation.
8629  if(nextAlloc1stIndex < suballoc1stCount)
8630  {
8631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8632 
8633  // 1. Process free space before this allocation.
8634  if(lastOffset < suballoc.offset)
8635  {
8636  // There is free space from lastOffset to suballoc.offset.
8637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8639  }
8640 
8641  // 2. Process this allocation.
8642  // There is allocation with suballoc.offset, suballoc.size.
8643  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8644 
8645  // 3. Prepare for next iteration.
8646  lastOffset = suballoc.offset + suballoc.size;
8647  ++nextAlloc1stIndex;
8648  }
8649  // We are at the end.
8650  else
8651  {
8652  if(lastOffset < freeSpace1stTo2ndEnd)
8653  {
8654  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8655  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8656  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8657  }
8658 
8659  // End of loop.
8660  lastOffset = freeSpace1stTo2ndEnd;
8661  }
8662  }
8663 
8664  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8665  {
8666  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8667  while(lastOffset < size)
8668  {
8669  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8670  while(nextAlloc2ndIndex != SIZE_MAX &&
8671  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8672  {
8673  --nextAlloc2ndIndex;
8674  }
8675 
8676  // Found non-null allocation.
8677  if(nextAlloc2ndIndex != SIZE_MAX)
8678  {
8679  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8680 
8681  // 1. Process free space before this allocation.
8682  if(lastOffset < suballoc.offset)
8683  {
8684  // There is free space from lastOffset to suballoc.offset.
8685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8687  }
8688 
8689  // 2. Process this allocation.
8690  // There is allocation with suballoc.offset, suballoc.size.
8691  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8692 
8693  // 3. Prepare for next iteration.
8694  lastOffset = suballoc.offset + suballoc.size;
8695  --nextAlloc2ndIndex;
8696  }
8697  // We are at the end.
8698  else
8699  {
8700  if(lastOffset < size)
8701  {
8702  // There is free space from lastOffset to size.
8703  const VkDeviceSize unusedRangeSize = size - lastOffset;
8704  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8705  }
8706 
8707  // End of loop.
8708  lastOffset = size;
8709  }
8710  }
8711  }
8712 
8713  PrintDetailedMap_End(json);
8714 }
8715 #endif // #if VMA_STATS_STRING_ENABLED
8716 
8717 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8718  uint32_t currentFrameIndex,
8719  uint32_t frameInUseCount,
8720  VkDeviceSize bufferImageGranularity,
8721  VkDeviceSize allocSize,
8722  VkDeviceSize allocAlignment,
8723  bool upperAddress,
8724  VmaSuballocationType allocType,
8725  bool canMakeOtherLost,
8726  uint32_t strategy,
8727  VmaAllocationRequest* pAllocationRequest)
8728 {
8729  VMA_ASSERT(allocSize > 0);
8730  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8731  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8732  VMA_HEAVY_ASSERT(Validate());
8733 
8734  const VkDeviceSize size = GetSize();
8735  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8736  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8737 
8738  if(upperAddress)
8739  {
8740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8741  {
8742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8743  return false;
8744  }
8745 
8746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8747  if(allocSize > size)
8748  {
8749  return false;
8750  }
8751  VkDeviceSize resultBaseOffset = size - allocSize;
8752  if(!suballocations2nd.empty())
8753  {
8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8755  resultBaseOffset = lastSuballoc.offset - allocSize;
8756  if(allocSize > lastSuballoc.offset)
8757  {
8758  return false;
8759  }
8760  }
8761 
8762  // Start from offset equal to end of free space.
8763  VkDeviceSize resultOffset = resultBaseOffset;
8764 
8765  // Apply VMA_DEBUG_MARGIN at the end.
8766  if(VMA_DEBUG_MARGIN > 0)
8767  {
8768  if(resultOffset < VMA_DEBUG_MARGIN)
8769  {
8770  return false;
8771  }
8772  resultOffset -= VMA_DEBUG_MARGIN;
8773  }
8774 
8775  // Apply alignment.
8776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8777 
8778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8779  // Make bigger alignment if necessary.
8780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8781  {
8782  bool bufferImageGranularityConflict = false;
8783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8784  {
8785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8787  {
8788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8789  {
8790  bufferImageGranularityConflict = true;
8791  break;
8792  }
8793  }
8794  else
8795  // Already on previous page.
8796  break;
8797  }
8798  if(bufferImageGranularityConflict)
8799  {
8800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8801  }
8802  }
8803 
8804  // There is enough free space.
8805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8806  suballocations1st.back().offset + suballocations1st.back().size :
8807  0;
8808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8809  {
8810  // Check previous suballocations for BufferImageGranularity conflicts.
8811  // If conflict exists, allocation cannot be made here.
8812  if(bufferImageGranularity > 1)
8813  {
8814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8815  {
8816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8818  {
8819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8820  {
8821  return false;
8822  }
8823  }
8824  else
8825  {
8826  // Already on next page.
8827  break;
8828  }
8829  }
8830  }
8831 
8832  // All tests passed: Success.
8833  pAllocationRequest->offset = resultOffset;
8834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8835  pAllocationRequest->sumItemSize = 0;
8836  // pAllocationRequest->item unused.
8837  pAllocationRequest->itemsToMakeLostCount = 0;
8838  return true;
8839  }
8840  }
8841  else // !upperAddress
8842  {
8843  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8844  {
8845  // Try to allocate at the end of 1st vector.
8846 
8847  VkDeviceSize resultBaseOffset = 0;
8848  if(!suballocations1st.empty())
8849  {
8850  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8851  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8852  }
8853 
8854  // Start from offset equal to beginning of free space.
8855  VkDeviceSize resultOffset = resultBaseOffset;
8856 
8857  // Apply VMA_DEBUG_MARGIN at the beginning.
8858  if(VMA_DEBUG_MARGIN > 0)
8859  {
8860  resultOffset += VMA_DEBUG_MARGIN;
8861  }
8862 
8863  // Apply alignment.
8864  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8865 
8866  // Check previous suballocations for BufferImageGranularity conflicts.
8867  // Make bigger alignment if necessary.
8868  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8869  {
8870  bool bufferImageGranularityConflict = false;
8871  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8872  {
8873  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8874  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8875  {
8876  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8877  {
8878  bufferImageGranularityConflict = true;
8879  break;
8880  }
8881  }
8882  else
8883  // Already on previous page.
8884  break;
8885  }
8886  if(bufferImageGranularityConflict)
8887  {
8888  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8889  }
8890  }
8891 
8892  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8893  suballocations2nd.back().offset : size;
8894 
8895  // There is enough free space at the end after alignment.
8896  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8897  {
8898  // Check next suballocations for BufferImageGranularity conflicts.
8899  // If conflict exists, allocation cannot be made here.
8900  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8901  {
8902  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8903  {
8904  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8905  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8906  {
8907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8908  {
8909  return false;
8910  }
8911  }
8912  else
8913  {
8914  // Already on previous page.
8915  break;
8916  }
8917  }
8918  }
8919 
8920  // All tests passed: Success.
8921  pAllocationRequest->offset = resultOffset;
8922  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8923  pAllocationRequest->sumItemSize = 0;
8924  // pAllocationRequest->item unused.
8925  pAllocationRequest->itemsToMakeLostCount = 0;
8926  return true;
8927  }
8928  }
8929 
8930  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8931  // beginning of 1st vector as the end of free space.
8932  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8933  {
8934  VMA_ASSERT(!suballocations1st.empty());
8935 
8936  VkDeviceSize resultBaseOffset = 0;
8937  if(!suballocations2nd.empty())
8938  {
8939  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8940  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8941  }
8942 
8943  // Start from offset equal to beginning of free space.
8944  VkDeviceSize resultOffset = resultBaseOffset;
8945 
8946  // Apply VMA_DEBUG_MARGIN at the beginning.
8947  if(VMA_DEBUG_MARGIN > 0)
8948  {
8949  resultOffset += VMA_DEBUG_MARGIN;
8950  }
8951 
8952  // Apply alignment.
8953  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8954 
8955  // Check previous suballocations for BufferImageGranularity conflicts.
8956  // Make bigger alignment if necessary.
8957  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8958  {
8959  bool bufferImageGranularityConflict = false;
8960  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8961  {
8962  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8963  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8964  {
8965  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8966  {
8967  bufferImageGranularityConflict = true;
8968  break;
8969  }
8970  }
8971  else
8972  // Already on previous page.
8973  break;
8974  }
8975  if(bufferImageGranularityConflict)
8976  {
8977  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8978  }
8979  }
8980 
8981  pAllocationRequest->itemsToMakeLostCount = 0;
8982  pAllocationRequest->sumItemSize = 0;
8983  size_t index1st = m_1stNullItemsBeginCount;
8984 
8985  if(canMakeOtherLost)
8986  {
8987  while(index1st < suballocations1st.size() &&
8988  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8989  {
8990  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8991  const VmaSuballocation& suballoc = suballocations1st[index1st];
8992  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8993  {
8994  // No problem.
8995  }
8996  else
8997  {
8998  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8999  if(suballoc.hAllocation->CanBecomeLost() &&
9000  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9001  {
9002  ++pAllocationRequest->itemsToMakeLostCount;
9003  pAllocationRequest->sumItemSize += suballoc.size;
9004  }
9005  else
9006  {
9007  return false;
9008  }
9009  }
9010  ++index1st;
9011  }
9012 
9013  // Check next suballocations for BufferImageGranularity conflicts.
9014  // If conflict exists, we must mark more allocations lost or fail.
9015  if(bufferImageGranularity > 1)
9016  {
9017  while(index1st < suballocations1st.size())
9018  {
9019  const VmaSuballocation& suballoc = suballocations1st[index1st];
9020  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9021  {
9022  if(suballoc.hAllocation != VK_NULL_HANDLE)
9023  {
9024  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9025  if(suballoc.hAllocation->CanBecomeLost() &&
9026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9027  {
9028  ++pAllocationRequest->itemsToMakeLostCount;
9029  pAllocationRequest->sumItemSize += suballoc.size;
9030  }
9031  else
9032  {
9033  return false;
9034  }
9035  }
9036  }
9037  else
9038  {
9039  // Already on next page.
9040  break;
9041  }
9042  ++index1st;
9043  }
9044  }
9045  }
9046 
9047  // There is enough free space at the end after alignment.
9048  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9049  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9050  {
9051  // Check next suballocations for BufferImageGranularity conflicts.
9052  // If conflict exists, allocation cannot be made here.
9053  if(bufferImageGranularity > 1)
9054  {
9055  for(size_t nextSuballocIndex = index1st;
9056  nextSuballocIndex < suballocations1st.size();
9057  nextSuballocIndex++)
9058  {
9059  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9060  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9061  {
9062  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9063  {
9064  return false;
9065  }
9066  }
9067  else
9068  {
9069  // Already on next page.
9070  break;
9071  }
9072  }
9073  }
9074 
9075  // All tests passed: Success.
9076  pAllocationRequest->offset = resultOffset;
9077  pAllocationRequest->sumFreeSize =
9078  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9079  - resultBaseOffset
9080  - pAllocationRequest->sumItemSize;
9081  // pAllocationRequest->item unused.
9082  return true;
9083  }
9084  }
9085  }
9086 
9087  return false;
9088 }
9089 
9090 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9091  uint32_t currentFrameIndex,
9092  uint32_t frameInUseCount,
9093  VmaAllocationRequest* pAllocationRequest)
9094 {
9095  if(pAllocationRequest->itemsToMakeLostCount == 0)
9096  {
9097  return true;
9098  }
9099 
9100  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9101 
9102  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9103  size_t index1st = m_1stNullItemsBeginCount;
9104  size_t madeLostCount = 0;
9105  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9106  {
9107  VMA_ASSERT(index1st < suballocations1st.size());
9108  VmaSuballocation& suballoc = suballocations1st[index1st];
9109  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9110  {
9111  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9112  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9113  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9114  {
9115  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9116  suballoc.hAllocation = VK_NULL_HANDLE;
9117  m_SumFreeSize += suballoc.size;
9118  ++m_1stNullItemsMiddleCount;
9119  ++madeLostCount;
9120  }
9121  else
9122  {
9123  return false;
9124  }
9125  }
9126  ++index1st;
9127  }
9128 
9129  CleanupAfterFree();
9130  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9131 
9132  return true;
9133 }
9134 
9135 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9136 {
9137  uint32_t lostAllocationCount = 0;
9138 
9139  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9140  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9141  {
9142  VmaSuballocation& suballoc = suballocations1st[i];
9143  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9144  suballoc.hAllocation->CanBecomeLost() &&
9145  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9146  {
9147  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9148  suballoc.hAllocation = VK_NULL_HANDLE;
9149  ++m_1stNullItemsMiddleCount;
9150  m_SumFreeSize += suballoc.size;
9151  ++lostAllocationCount;
9152  }
9153  }
9154 
9155  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9156  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9157  {
9158  VmaSuballocation& suballoc = suballocations2nd[i];
9159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9160  suballoc.hAllocation->CanBecomeLost() &&
9161  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9162  {
9163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9164  suballoc.hAllocation = VK_NULL_HANDLE;
9165  ++m_2ndNullItemsCount;
9166  ++lostAllocationCount;
9167  }
9168  }
9169 
9170  if(lostAllocationCount)
9171  {
9172  CleanupAfterFree();
9173  }
9174 
9175  return lostAllocationCount;
9176 }
9177 
9178 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9179 {
9180  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9181  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9182  {
9183  const VmaSuballocation& suballoc = suballocations1st[i];
9184  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9185  {
9186  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9187  {
9188  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9189  return VK_ERROR_VALIDATION_FAILED_EXT;
9190  }
9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9192  {
9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9194  return VK_ERROR_VALIDATION_FAILED_EXT;
9195  }
9196  }
9197  }
9198 
9199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9201  {
9202  const VmaSuballocation& suballoc = suballocations2nd[i];
9203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9204  {
9205  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9206  {
9207  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9208  return VK_ERROR_VALIDATION_FAILED_EXT;
9209  }
9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9211  {
9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9213  return VK_ERROR_VALIDATION_FAILED_EXT;
9214  }
9215  }
9216  }
9217 
9218  return VK_SUCCESS;
9219 }
9220 
9221 void VmaBlockMetadata_Linear::Alloc(
9222  const VmaAllocationRequest& request,
9223  VmaSuballocationType type,
9224  VkDeviceSize allocSize,
9225  bool upperAddress,
9226  VmaAllocation hAllocation)
9227 {
9228  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9229 
9230  if(upperAddress)
9231  {
9232  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9233  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9235  suballocations2nd.push_back(newSuballoc);
9236  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9237  }
9238  else
9239  {
9240  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9241 
9242  // First allocation.
9243  if(suballocations1st.empty())
9244  {
9245  suballocations1st.push_back(newSuballoc);
9246  }
9247  else
9248  {
9249  // New allocation at the end of 1st vector.
9250  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9251  {
9252  // Check if it fits before the end of the block.
9253  VMA_ASSERT(request.offset + allocSize <= GetSize());
9254  suballocations1st.push_back(newSuballoc);
9255  }
9256  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9257  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9258  {
9259  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9260 
9261  switch(m_2ndVectorMode)
9262  {
9263  case SECOND_VECTOR_EMPTY:
9264  // First allocation from second part ring buffer.
9265  VMA_ASSERT(suballocations2nd.empty());
9266  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9267  break;
9268  case SECOND_VECTOR_RING_BUFFER:
9269  // 2-part ring buffer is already started.
9270  VMA_ASSERT(!suballocations2nd.empty());
9271  break;
9272  case SECOND_VECTOR_DOUBLE_STACK:
9273  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9274  break;
9275  default:
9276  VMA_ASSERT(0);
9277  }
9278 
9279  suballocations2nd.push_back(newSuballoc);
9280  }
9281  else
9282  {
9283  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9284  }
9285  }
9286  }
9287 
9288  m_SumFreeSize -= newSuballoc.size;
9289 }
9290 
9291 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9292 {
9293  FreeAtOffset(allocation->GetOffset());
9294 }
9295 
9296 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9297 {
9298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9300 
9301  if(!suballocations1st.empty())
9302  {
9303  // First allocation: Mark it as next empty at the beginning.
9304  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9305  if(firstSuballoc.offset == offset)
9306  {
9307  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9308  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9309  m_SumFreeSize += firstSuballoc.size;
9310  ++m_1stNullItemsBeginCount;
9311  CleanupAfterFree();
9312  return;
9313  }
9314  }
9315 
9316  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9317  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9318  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9319  {
9320  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9321  if(lastSuballoc.offset == offset)
9322  {
9323  m_SumFreeSize += lastSuballoc.size;
9324  suballocations2nd.pop_back();
9325  CleanupAfterFree();
9326  return;
9327  }
9328  }
9329  // Last allocation in 1st vector.
9330  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9331  {
9332  VmaSuballocation& lastSuballoc = suballocations1st.back();
9333  if(lastSuballoc.offset == offset)
9334  {
9335  m_SumFreeSize += lastSuballoc.size;
9336  suballocations1st.pop_back();
9337  CleanupAfterFree();
9338  return;
9339  }
9340  }
9341 
9342  // Item from the middle of 1st vector.
9343  {
9344  VmaSuballocation refSuballoc;
9345  refSuballoc.offset = offset;
9346  // Rest of members stays uninitialized intentionally for better performance.
9347  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9348  suballocations1st.begin() + m_1stNullItemsBeginCount,
9349  suballocations1st.end(),
9350  refSuballoc);
9351  if(it != suballocations1st.end())
9352  {
9353  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9354  it->hAllocation = VK_NULL_HANDLE;
9355  ++m_1stNullItemsMiddleCount;
9356  m_SumFreeSize += it->size;
9357  CleanupAfterFree();
9358  return;
9359  }
9360  }
9361 
9362  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9363  {
9364  // Item from the middle of 2nd vector.
9365  VmaSuballocation refSuballoc;
9366  refSuballoc.offset = offset;
9367  // Rest of members stays uninitialized intentionally for better performance.
9368  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9369  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9370  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9371  if(it != suballocations2nd.end())
9372  {
9373  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9374  it->hAllocation = VK_NULL_HANDLE;
9375  ++m_2ndNullItemsCount;
9376  m_SumFreeSize += it->size;
9377  CleanupAfterFree();
9378  return;
9379  }
9380  }
9381 
9382  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9383 }
9384 
9385 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9386 {
9387  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9388  const size_t suballocCount = AccessSuballocations1st().size();
9389  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9390 }
9391 
9392 void VmaBlockMetadata_Linear::CleanupAfterFree()
9393 {
9394  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9395  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9396 
9397  if(IsEmpty())
9398  {
9399  suballocations1st.clear();
9400  suballocations2nd.clear();
9401  m_1stNullItemsBeginCount = 0;
9402  m_1stNullItemsMiddleCount = 0;
9403  m_2ndNullItemsCount = 0;
9404  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9405  }
9406  else
9407  {
9408  const size_t suballoc1stCount = suballocations1st.size();
9409  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9410  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9411 
9412  // Find more null items at the beginning of 1st vector.
9413  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9414  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9415  {
9416  ++m_1stNullItemsBeginCount;
9417  --m_1stNullItemsMiddleCount;
9418  }
9419 
9420  // Find more null items at the end of 1st vector.
9421  while(m_1stNullItemsMiddleCount > 0 &&
9422  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9423  {
9424  --m_1stNullItemsMiddleCount;
9425  suballocations1st.pop_back();
9426  }
9427 
9428  // Find more null items at the end of 2nd vector.
9429  while(m_2ndNullItemsCount > 0 &&
9430  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9431  {
9432  --m_2ndNullItemsCount;
9433  suballocations2nd.pop_back();
9434  }
9435 
9436  if(ShouldCompact1st())
9437  {
9438  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9439  size_t srcIndex = m_1stNullItemsBeginCount;
9440  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9441  {
9442  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9443  {
9444  ++srcIndex;
9445  }
9446  if(dstIndex != srcIndex)
9447  {
9448  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9449  }
9450  ++srcIndex;
9451  }
9452  suballocations1st.resize(nonNullItemCount);
9453  m_1stNullItemsBeginCount = 0;
9454  m_1stNullItemsMiddleCount = 0;
9455  }
9456 
9457  // 2nd vector became empty.
9458  if(suballocations2nd.empty())
9459  {
9460  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9461  }
9462 
9463  // 1st vector became empty.
9464  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9465  {
9466  suballocations1st.clear();
9467  m_1stNullItemsBeginCount = 0;
9468 
9469  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9470  {
9471  // Swap 1st with 2nd. Now 2nd is empty.
9472  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9473  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9474  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9475  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9476  {
9477  ++m_1stNullItemsBeginCount;
9478  --m_1stNullItemsMiddleCount;
9479  }
9480  m_2ndNullItemsCount = 0;
9481  m_1stVectorIndex ^= 1;
9482  }
9483  }
9484  }
9485 
9486  VMA_HEAVY_ASSERT(Validate());
9487 }
9488 
9489 
9491 // class VmaBlockMetadata_Buddy
9492 
9493 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9494  VmaBlockMetadata(hAllocator),
9495  m_Root(VMA_NULL),
9496  m_AllocationCount(0),
9497  m_FreeCount(1),
9498  m_SumFreeSize(0)
9499 {
9500  memset(m_FreeList, 0, sizeof(m_FreeList));
9501 }
9502 
9503 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9504 {
9505  DeleteNode(m_Root);
9506 }
9507 
9508 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9509 {
9510  VmaBlockMetadata::Init(size);
9511 
9512  m_UsableSize = VmaPrevPow2(size);
9513  m_SumFreeSize = m_UsableSize;
9514 
9515  // Calculate m_LevelCount.
9516  m_LevelCount = 1;
9517  while(m_LevelCount < MAX_LEVELS &&
9518  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9519  {
9520  ++m_LevelCount;
9521  }
9522 
9523  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9524  rootNode->offset = 0;
9525  rootNode->type = Node::TYPE_FREE;
9526  rootNode->parent = VMA_NULL;
9527  rootNode->buddy = VMA_NULL;
9528 
9529  m_Root = rootNode;
9530  AddToFreeListFront(0, rootNode);
9531 }
9532 
9533 bool VmaBlockMetadata_Buddy::Validate() const
9534 {
9535  // Validate tree.
9536  ValidationContext ctx;
9537  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9538  {
9539  VMA_VALIDATE(false && "ValidateNode failed.");
9540  }
9541  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9542  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9543 
9544  // Validate free node lists.
9545  for(uint32_t level = 0; level < m_LevelCount; ++level)
9546  {
9547  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9548  m_FreeList[level].front->free.prev == VMA_NULL);
9549 
9550  for(Node* node = m_FreeList[level].front;
9551  node != VMA_NULL;
9552  node = node->free.next)
9553  {
9554  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9555 
9556  if(node->free.next == VMA_NULL)
9557  {
9558  VMA_VALIDATE(m_FreeList[level].back == node);
9559  }
9560  else
9561  {
9562  VMA_VALIDATE(node->free.next->free.prev == node);
9563  }
9564  }
9565  }
9566 
9567  // Validate that free lists ar higher levels are empty.
9568  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9569  {
9570  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9571  }
9572 
9573  return true;
9574 }
9575 
9576 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9577 {
9578  for(uint32_t level = 0; level < m_LevelCount; ++level)
9579  {
9580  if(m_FreeList[level].front != VMA_NULL)
9581  {
9582  return LevelToNodeSize(level);
9583  }
9584  }
9585  return 0;
9586 }
9587 
9588 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9589 {
9590  const VkDeviceSize unusableSize = GetUnusableSize();
9591 
9592  outInfo.blockCount = 1;
9593 
9594  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9595  outInfo.usedBytes = outInfo.unusedBytes = 0;
9596 
9597  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9598  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9599  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9600 
9601  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9602 
9603  if(unusableSize > 0)
9604  {
9605  ++outInfo.unusedRangeCount;
9606  outInfo.unusedBytes += unusableSize;
9607  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9608  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9609  }
9610 }
9611 
9612 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9613 {
9614  const VkDeviceSize unusableSize = GetUnusableSize();
9615 
9616  inoutStats.size += GetSize();
9617  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9618  inoutStats.allocationCount += m_AllocationCount;
9619  inoutStats.unusedRangeCount += m_FreeCount;
9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9621 
9622  if(unusableSize > 0)
9623  {
9624  ++inoutStats.unusedRangeCount;
9625  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9626  }
9627 }
9628 
9629 #if VMA_STATS_STRING_ENABLED
9630 
9631 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9632 {
9633  // TODO optimize
9634  VmaStatInfo stat;
9635  CalcAllocationStatInfo(stat);
9636 
9637  PrintDetailedMap_Begin(
9638  json,
9639  stat.unusedBytes,
9640  stat.allocationCount,
9641  stat.unusedRangeCount);
9642 
9643  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9644 
9645  const VkDeviceSize unusableSize = GetUnusableSize();
9646  if(unusableSize > 0)
9647  {
9648  PrintDetailedMap_UnusedRange(json,
9649  m_UsableSize, // offset
9650  unusableSize); // size
9651  }
9652 
9653  PrintDetailedMap_End(json);
9654 }
9655 
9656 #endif // #if VMA_STATS_STRING_ENABLED
9657 
9658 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9659  uint32_t currentFrameIndex,
9660  uint32_t frameInUseCount,
9661  VkDeviceSize bufferImageGranularity,
9662  VkDeviceSize allocSize,
9663  VkDeviceSize allocAlignment,
9664  bool upperAddress,
9665  VmaSuballocationType allocType,
9666  bool canMakeOtherLost,
9667  uint32_t strategy,
9668  VmaAllocationRequest* pAllocationRequest)
9669 {
9670  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9671 
9672  // Simple way to respect bufferImageGranularity. May be optimized some day.
9673  // Whenever it might be an OPTIMAL image...
9674  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9675  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9676  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9677  {
9678  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9679  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9680  }
9681 
9682  if(allocSize > m_UsableSize)
9683  {
9684  return false;
9685  }
9686 
9687  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9688  for(uint32_t level = targetLevel + 1; level--; )
9689  {
9690  for(Node* freeNode = m_FreeList[level].front;
9691  freeNode != VMA_NULL;
9692  freeNode = freeNode->free.next)
9693  {
9694  if(freeNode->offset % allocAlignment == 0)
9695  {
9696  pAllocationRequest->offset = freeNode->offset;
9697  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9698  pAllocationRequest->sumItemSize = 0;
9699  pAllocationRequest->itemsToMakeLostCount = 0;
9700  pAllocationRequest->customData = (void*)(uintptr_t)level;
9701  return true;
9702  }
9703  }
9704  }
9705 
9706  return false;
9707 }
9708 
9709 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9710  uint32_t currentFrameIndex,
9711  uint32_t frameInUseCount,
9712  VmaAllocationRequest* pAllocationRequest)
9713 {
9714  /*
9715  Lost allocations are not supported in buddy allocator at the moment.
9716  Support might be added in the future.
9717  */
9718  return pAllocationRequest->itemsToMakeLostCount == 0;
9719 }
9720 
9721 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9722 {
9723  /*
9724  Lost allocations are not supported in buddy allocator at the moment.
9725  Support might be added in the future.
9726  */
9727  return 0;
9728 }
9729 
9730 void VmaBlockMetadata_Buddy::Alloc(
9731  const VmaAllocationRequest& request,
9732  VmaSuballocationType type,
9733  VkDeviceSize allocSize,
9734  bool upperAddress,
9735  VmaAllocation hAllocation)
9736 {
9737  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9738  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9739 
9740  Node* currNode = m_FreeList[currLevel].front;
9741  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9742  while(currNode->offset != request.offset)
9743  {
9744  currNode = currNode->free.next;
9745  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9746  }
9747 
9748  // Go down, splitting free nodes.
9749  while(currLevel < targetLevel)
9750  {
9751  // currNode is already first free node at currLevel.
9752  // Remove it from list of free nodes at this currLevel.
9753  RemoveFromFreeList(currLevel, currNode);
9754 
9755  const uint32_t childrenLevel = currLevel + 1;
9756 
9757  // Create two free sub-nodes.
9758  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9759  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9760 
9761  leftChild->offset = currNode->offset;
9762  leftChild->type = Node::TYPE_FREE;
9763  leftChild->parent = currNode;
9764  leftChild->buddy = rightChild;
9765 
9766  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9767  rightChild->type = Node::TYPE_FREE;
9768  rightChild->parent = currNode;
9769  rightChild->buddy = leftChild;
9770 
9771  // Convert current currNode to split type.
9772  currNode->type = Node::TYPE_SPLIT;
9773  currNode->split.leftChild = leftChild;
9774 
9775  // Add child nodes to free list. Order is important!
9776  AddToFreeListFront(childrenLevel, rightChild);
9777  AddToFreeListFront(childrenLevel, leftChild);
9778 
9779  ++m_FreeCount;
9780  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9781  ++currLevel;
9782  currNode = m_FreeList[currLevel].front;
9783 
9784  /*
9785  We can be sure that currNode, as left child of node previously split,
9786  also fullfills the alignment requirement.
9787  */
9788  }
9789 
9790  // Remove from free list.
9791  VMA_ASSERT(currLevel == targetLevel &&
9792  currNode != VMA_NULL &&
9793  currNode->type == Node::TYPE_FREE);
9794  RemoveFromFreeList(currLevel, currNode);
9795 
9796  // Convert to allocation node.
9797  currNode->type = Node::TYPE_ALLOCATION;
9798  currNode->allocation.alloc = hAllocation;
9799 
9800  ++m_AllocationCount;
9801  --m_FreeCount;
9802  m_SumFreeSize -= allocSize;
9803 }
9804 
9805 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9806 {
9807  if(node->type == Node::TYPE_SPLIT)
9808  {
9809  DeleteNode(node->split.leftChild->buddy);
9810  DeleteNode(node->split.leftChild);
9811  }
9812 
9813  vma_delete(GetAllocationCallbacks(), node);
9814 }
9815 
9816 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9817 {
9818  VMA_VALIDATE(level < m_LevelCount);
9819  VMA_VALIDATE(curr->parent == parent);
9820  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9821  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9822  switch(curr->type)
9823  {
9824  case Node::TYPE_FREE:
9825  // curr->free.prev, next are validated separately.
9826  ctx.calculatedSumFreeSize += levelNodeSize;
9827  ++ctx.calculatedFreeCount;
9828  break;
9829  case Node::TYPE_ALLOCATION:
9830  ++ctx.calculatedAllocationCount;
9831  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9832  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9833  break;
9834  case Node::TYPE_SPLIT:
9835  {
9836  const uint32_t childrenLevel = level + 1;
9837  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9838  const Node* const leftChild = curr->split.leftChild;
9839  VMA_VALIDATE(leftChild != VMA_NULL);
9840  VMA_VALIDATE(leftChild->offset == curr->offset);
9841  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9842  {
9843  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9844  }
9845  const Node* const rightChild = leftChild->buddy;
9846  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9847  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9848  {
9849  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9850  }
9851  }
9852  break;
9853  default:
9854  return false;
9855  }
9856 
9857  return true;
9858 }
9859 
9860 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9861 {
9862  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9863  uint32_t level = 0;
9864  VkDeviceSize currLevelNodeSize = m_UsableSize;
9865  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9866  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9867  {
9868  ++level;
9869  currLevelNodeSize = nextLevelNodeSize;
9870  nextLevelNodeSize = currLevelNodeSize >> 1;
9871  }
9872  return level;
9873 }
9874 
9875 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9876 {
9877  // Find node and level.
9878  Node* node = m_Root;
9879  VkDeviceSize nodeOffset = 0;
9880  uint32_t level = 0;
9881  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9882  while(node->type == Node::TYPE_SPLIT)
9883  {
9884  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9885  if(offset < nodeOffset + nextLevelSize)
9886  {
9887  node = node->split.leftChild;
9888  }
9889  else
9890  {
9891  node = node->split.leftChild->buddy;
9892  nodeOffset += nextLevelSize;
9893  }
9894  ++level;
9895  levelNodeSize = nextLevelSize;
9896  }
9897 
9898  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9899  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9900 
9901  ++m_FreeCount;
9902  --m_AllocationCount;
9903  m_SumFreeSize += alloc->GetSize();
9904 
9905  node->type = Node::TYPE_FREE;
9906 
9907  // Join free nodes if possible.
9908  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9909  {
9910  RemoveFromFreeList(level, node->buddy);
9911  Node* const parent = node->parent;
9912 
9913  vma_delete(GetAllocationCallbacks(), node->buddy);
9914  vma_delete(GetAllocationCallbacks(), node);
9915  parent->type = Node::TYPE_FREE;
9916 
9917  node = parent;
9918  --level;
9919  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9920  --m_FreeCount;
9921  }
9922 
9923  AddToFreeListFront(level, node);
9924 }
9925 
9926 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9927 {
9928  switch(node->type)
9929  {
9930  case Node::TYPE_FREE:
9931  ++outInfo.unusedRangeCount;
9932  outInfo.unusedBytes += levelNodeSize;
9933  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9934  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9935  break;
9936  case Node::TYPE_ALLOCATION:
9937  {
9938  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9939  ++outInfo.allocationCount;
9940  outInfo.usedBytes += allocSize;
9941  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9942  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9943 
9944  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9945  if(unusedRangeSize > 0)
9946  {
9947  ++outInfo.unusedRangeCount;
9948  outInfo.unusedBytes += unusedRangeSize;
9949  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9950  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9951  }
9952  }
9953  break;
9954  case Node::TYPE_SPLIT:
9955  {
9956  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9957  const Node* const leftChild = node->split.leftChild;
9958  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9959  const Node* const rightChild = leftChild->buddy;
9960  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9961  }
9962  break;
9963  default:
9964  VMA_ASSERT(0);
9965  }
9966 }
9967 
9968 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9969 {
9970  VMA_ASSERT(node->type == Node::TYPE_FREE);
9971 
9972  // List is empty.
9973  Node* const frontNode = m_FreeList[level].front;
9974  if(frontNode == VMA_NULL)
9975  {
9976  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9977  node->free.prev = node->free.next = VMA_NULL;
9978  m_FreeList[level].front = m_FreeList[level].back = node;
9979  }
9980  else
9981  {
9982  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9983  node->free.prev = VMA_NULL;
9984  node->free.next = frontNode;
9985  frontNode->free.prev = node;
9986  m_FreeList[level].front = node;
9987  }
9988 }
9989 
9990 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9991 {
9992  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9993 
9994  // It is at the front.
9995  if(node->free.prev == VMA_NULL)
9996  {
9997  VMA_ASSERT(m_FreeList[level].front == node);
9998  m_FreeList[level].front = node->free.next;
9999  }
10000  else
10001  {
10002  Node* const prevFreeNode = node->free.prev;
10003  VMA_ASSERT(prevFreeNode->free.next == node);
10004  prevFreeNode->free.next = node->free.next;
10005  }
10006 
10007  // It is at the back.
10008  if(node->free.next == VMA_NULL)
10009  {
10010  VMA_ASSERT(m_FreeList[level].back == node);
10011  m_FreeList[level].back = node->free.prev;
10012  }
10013  else
10014  {
10015  Node* const nextFreeNode = node->free.next;
10016  VMA_ASSERT(nextFreeNode->free.prev == node);
10017  nextFreeNode->free.prev = node->free.prev;
10018  }
10019 }
10020 
10021 #if VMA_STATS_STRING_ENABLED
10022 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10023 {
10024  switch(node->type)
10025  {
10026  case Node::TYPE_FREE:
10027  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10028  break;
10029  case Node::TYPE_ALLOCATION:
10030  {
10031  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10032  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10033  if(allocSize < levelNodeSize)
10034  {
10035  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10036  }
10037  }
10038  break;
10039  case Node::TYPE_SPLIT:
10040  {
10041  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10042  const Node* const leftChild = node->split.leftChild;
10043  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10044  const Node* const rightChild = leftChild->buddy;
10045  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10046  }
10047  break;
10048  default:
10049  VMA_ASSERT(0);
10050  }
10051 }
10052 #endif // #if VMA_STATS_STRING_ENABLED
10053 
10054 
10056 // class VmaDeviceMemoryBlock
10057 
10058 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10059  m_pMetadata(VMA_NULL),
10060  m_MemoryTypeIndex(UINT32_MAX),
10061  m_Id(0),
10062  m_hMemory(VK_NULL_HANDLE),
10063  m_MapCount(0),
10064  m_pMappedData(VMA_NULL)
10065 {
10066 }
10067 
10068 void VmaDeviceMemoryBlock::Init(
10069  VmaAllocator hAllocator,
10070  uint32_t newMemoryTypeIndex,
10071  VkDeviceMemory newMemory,
10072  VkDeviceSize newSize,
10073  uint32_t id,
10074  uint32_t algorithm)
10075 {
10076  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10077 
10078  m_MemoryTypeIndex = newMemoryTypeIndex;
10079  m_Id = id;
10080  m_hMemory = newMemory;
10081 
10082  switch(algorithm)
10083  {
10085  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10086  break;
10088  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10089  break;
10090  default:
10091  VMA_ASSERT(0);
10092  // Fall-through.
10093  case 0:
10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10095  }
10096  m_pMetadata->Init(newSize);
10097 }
10098 
10099 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10100 {
10101  // This is the most important assert in the entire library.
10102  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10103  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10104 
10105  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10106  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10107  m_hMemory = VK_NULL_HANDLE;
10108 
10109  vma_delete(allocator, m_pMetadata);
10110  m_pMetadata = VMA_NULL;
10111 }
10112 
10113 bool VmaDeviceMemoryBlock::Validate() const
10114 {
10115  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10116  (m_pMetadata->GetSize() != 0));
10117 
10118  return m_pMetadata->Validate();
10119 }
10120 
10121 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10122 {
10123  void* pData = nullptr;
10124  VkResult res = Map(hAllocator, 1, &pData);
10125  if(res != VK_SUCCESS)
10126  {
10127  return res;
10128  }
10129 
10130  res = m_pMetadata->CheckCorruption(pData);
10131 
10132  Unmap(hAllocator, 1);
10133 
10134  return res;
10135 }
10136 
10137 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10138 {
10139  if(count == 0)
10140  {
10141  return VK_SUCCESS;
10142  }
10143 
10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10145  if(m_MapCount != 0)
10146  {
10147  m_MapCount += count;
10148  VMA_ASSERT(m_pMappedData != VMA_NULL);
10149  if(ppData != VMA_NULL)
10150  {
10151  *ppData = m_pMappedData;
10152  }
10153  return VK_SUCCESS;
10154  }
10155  else
10156  {
10157  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10158  hAllocator->m_hDevice,
10159  m_hMemory,
10160  0, // offset
10161  VK_WHOLE_SIZE,
10162  0, // flags
10163  &m_pMappedData);
10164  if(result == VK_SUCCESS)
10165  {
10166  if(ppData != VMA_NULL)
10167  {
10168  *ppData = m_pMappedData;
10169  }
10170  m_MapCount = count;
10171  }
10172  return result;
10173  }
10174 }
10175 
10176 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10177 {
10178  if(count == 0)
10179  {
10180  return;
10181  }
10182 
10183  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10184  if(m_MapCount >= count)
10185  {
10186  m_MapCount -= count;
10187  if(m_MapCount == 0)
10188  {
10189  m_pMappedData = VMA_NULL;
10190  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10191  }
10192  }
10193  else
10194  {
10195  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10196  }
10197 }
10198 
10199 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10200 {
10201  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10202  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10203 
10204  void* pData;
10205  VkResult res = Map(hAllocator, 1, &pData);
10206  if(res != VK_SUCCESS)
10207  {
10208  return res;
10209  }
10210 
10211  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10212  VmaWriteMagicValue(pData, allocOffset + allocSize);
10213 
10214  Unmap(hAllocator, 1);
10215 
10216  return VK_SUCCESS;
10217 }
10218 
10219 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10220 {
10221  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10222  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10223 
10224  void* pData;
10225  VkResult res = Map(hAllocator, 1, &pData);
10226  if(res != VK_SUCCESS)
10227  {
10228  return res;
10229  }
10230 
10231  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10232  {
10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10234  }
10235  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10236  {
10237  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10238  }
10239 
10240  Unmap(hAllocator, 1);
10241 
10242  return VK_SUCCESS;
10243 }
10244 
10245 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10246  const VmaAllocator hAllocator,
10247  const VmaAllocation hAllocation,
10248  VkBuffer hBuffer)
10249 {
10250  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10251  hAllocation->GetBlock() == this);
10252  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10253  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10254  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10255  hAllocator->m_hDevice,
10256  hBuffer,
10257  m_hMemory,
10258  hAllocation->GetOffset());
10259 }
10260 
10261 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10262  const VmaAllocator hAllocator,
10263  const VmaAllocation hAllocation,
10264  VkImage hImage)
10265 {
10266  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10267  hAllocation->GetBlock() == this);
10268  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10270  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10271  hAllocator->m_hDevice,
10272  hImage,
10273  m_hMemory,
10274  hAllocation->GetOffset());
10275 }
10276 
10277 static void InitStatInfo(VmaStatInfo& outInfo)
10278 {
10279  memset(&outInfo, 0, sizeof(outInfo));
10280  outInfo.allocationSizeMin = UINT64_MAX;
10281  outInfo.unusedRangeSizeMin = UINT64_MAX;
10282 }
10283 
10284 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10285 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10286 {
10287  inoutInfo.blockCount += srcInfo.blockCount;
10288  inoutInfo.allocationCount += srcInfo.allocationCount;
10289  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10290  inoutInfo.usedBytes += srcInfo.usedBytes;
10291  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10292  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10293  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10294  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10295  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10296 }
10297 
10298 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10299 {
10300  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10301  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10302  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10304 }
10305 
10306 VmaPool_T::VmaPool_T(
10307  VmaAllocator hAllocator,
10308  const VmaPoolCreateInfo& createInfo,
10309  VkDeviceSize preferredBlockSize) :
10310  m_BlockVector(
10311  hAllocator,
10312  createInfo.memoryTypeIndex,
10313  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10314  createInfo.minBlockCount,
10315  createInfo.maxBlockCount,
10316  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10317  createInfo.frameInUseCount,
10318  true, // isCustomPool
10319  createInfo.blockSize != 0, // explicitBlockSize
10320  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10321  m_Id(0)
10322 {
10323 }
10324 
10325 VmaPool_T::~VmaPool_T()
10326 {
10327 }
10328 
10329 #if VMA_STATS_STRING_ENABLED
10330 
10331 #endif // #if VMA_STATS_STRING_ENABLED
10332 
10333 VmaBlockVector::VmaBlockVector(
10334  VmaAllocator hAllocator,
10335  uint32_t memoryTypeIndex,
10336  VkDeviceSize preferredBlockSize,
10337  size_t minBlockCount,
10338  size_t maxBlockCount,
10339  VkDeviceSize bufferImageGranularity,
10340  uint32_t frameInUseCount,
10341  bool isCustomPool,
10342  bool explicitBlockSize,
10343  uint32_t algorithm) :
10344  m_hAllocator(hAllocator),
10345  m_MemoryTypeIndex(memoryTypeIndex),
10346  m_PreferredBlockSize(preferredBlockSize),
10347  m_MinBlockCount(minBlockCount),
10348  m_MaxBlockCount(maxBlockCount),
10349  m_BufferImageGranularity(bufferImageGranularity),
10350  m_FrameInUseCount(frameInUseCount),
10351  m_IsCustomPool(isCustomPool),
10352  m_ExplicitBlockSize(explicitBlockSize),
10353  m_Algorithm(algorithm),
10354  m_HasEmptyBlock(false),
10355  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10356  m_pDefragmentator(VMA_NULL),
10357  m_NextBlockId(0)
10358 {
10359 }
10360 
10361 VmaBlockVector::~VmaBlockVector()
10362 {
10363  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10364 
10365  for(size_t i = m_Blocks.size(); i--; )
10366  {
10367  m_Blocks[i]->Destroy(m_hAllocator);
10368  vma_delete(m_hAllocator, m_Blocks[i]);
10369  }
10370 }
10371 
10372 VkResult VmaBlockVector::CreateMinBlocks()
10373 {
10374  for(size_t i = 0; i < m_MinBlockCount; ++i)
10375  {
10376  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10377  if(res != VK_SUCCESS)
10378  {
10379  return res;
10380  }
10381  }
10382  return VK_SUCCESS;
10383 }
10384 
10385 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10386 {
10387  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10388 
10389  const size_t blockCount = m_Blocks.size();
10390 
10391  pStats->size = 0;
10392  pStats->unusedSize = 0;
10393  pStats->allocationCount = 0;
10394  pStats->unusedRangeCount = 0;
10395  pStats->unusedRangeSizeMax = 0;
10396  pStats->blockCount = blockCount;
10397 
10398  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10399  {
10400  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10401  VMA_ASSERT(pBlock);
10402  VMA_HEAVY_ASSERT(pBlock->Validate());
10403  pBlock->m_pMetadata->AddPoolStats(*pStats);
10404  }
10405 }
10406 
10407 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10408 {
10409  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10410  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10411  (VMA_DEBUG_MARGIN > 0) &&
10412  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10413 }
10414 
10415 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10416 
10417 VkResult VmaBlockVector::Allocate(
10418  VmaPool hCurrentPool,
10419  uint32_t currentFrameIndex,
10420  VkDeviceSize size,
10421  VkDeviceSize alignment,
10422  const VmaAllocationCreateInfo& createInfo,
10423  VmaSuballocationType suballocType,
10424  VmaAllocation* pAllocation)
10425 {
10426  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10427  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10428  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10429  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10430  const bool canCreateNewBlock =
10431  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10432  (m_Blocks.size() < m_MaxBlockCount);
10433  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10434 
10435  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10436  // Which in turn is available only when maxBlockCount = 1.
10437  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10438  {
10439  canMakeOtherLost = false;
10440  }
10441 
10442  // Upper address can only be used with linear allocator and within single memory block.
10443  if(isUpperAddress &&
10444  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10445  {
10446  return VK_ERROR_FEATURE_NOT_PRESENT;
10447  }
10448 
10449  // Validate strategy.
10450  switch(strategy)
10451  {
10452  case 0:
10454  break;
10458  break;
10459  default:
10460  return VK_ERROR_FEATURE_NOT_PRESENT;
10461  }
10462 
10463  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10464  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10465  {
10466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10467  }
10468 
10469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10470 
10471  /*
10472  Under certain condition, this whole section can be skipped for optimization, so
10473  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10474  e.g. for custom pools with linear algorithm.
10475  */
10476  if(!canMakeOtherLost || canCreateNewBlock)
10477  {
10478  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10479  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10481 
10482  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10483  {
10484  // Use only last block.
10485  if(!m_Blocks.empty())
10486  {
10487  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10488  VMA_ASSERT(pCurrBlock);
10489  VkResult res = AllocateFromBlock(
10490  pCurrBlock,
10491  hCurrentPool,
10492  currentFrameIndex,
10493  size,
10494  alignment,
10495  allocFlagsCopy,
10496  createInfo.pUserData,
10497  suballocType,
10498  strategy,
10499  pAllocation);
10500  if(res == VK_SUCCESS)
10501  {
10502  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10503  return VK_SUCCESS;
10504  }
10505  }
10506  }
10507  else
10508  {
10510  {
10511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10513  {
10514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10515  VMA_ASSERT(pCurrBlock);
10516  VkResult res = AllocateFromBlock(
10517  pCurrBlock,
10518  hCurrentPool,
10519  currentFrameIndex,
10520  size,
10521  alignment,
10522  allocFlagsCopy,
10523  createInfo.pUserData,
10524  suballocType,
10525  strategy,
10526  pAllocation);
10527  if(res == VK_SUCCESS)
10528  {
10529  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10530  return VK_SUCCESS;
10531  }
10532  }
10533  }
10534  else // WORST_FIT, FIRST_FIT
10535  {
10536  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10537  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10538  {
10539  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10540  VMA_ASSERT(pCurrBlock);
10541  VkResult res = AllocateFromBlock(
10542  pCurrBlock,
10543  hCurrentPool,
10544  currentFrameIndex,
10545  size,
10546  alignment,
10547  allocFlagsCopy,
10548  createInfo.pUserData,
10549  suballocType,
10550  strategy,
10551  pAllocation);
10552  if(res == VK_SUCCESS)
10553  {
10554  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10555  return VK_SUCCESS;
10556  }
10557  }
10558  }
10559  }
10560 
10561  // 2. Try to create new block.
10562  if(canCreateNewBlock)
10563  {
10564  // Calculate optimal size for new block.
10565  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10566  uint32_t newBlockSizeShift = 0;
10567  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10568 
10569  if(!m_ExplicitBlockSize)
10570  {
10571  // Allocate 1/8, 1/4, 1/2 as first blocks.
10572  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10573  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10574  {
10575  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10576  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10577  {
10578  newBlockSize = smallerNewBlockSize;
10579  ++newBlockSizeShift;
10580  }
10581  else
10582  {
10583  break;
10584  }
10585  }
10586  }
10587 
10588  size_t newBlockIndex = 0;
10589  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10590  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10591  if(!m_ExplicitBlockSize)
10592  {
10593  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10594  {
10595  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10596  if(smallerNewBlockSize >= size)
10597  {
10598  newBlockSize = smallerNewBlockSize;
10599  ++newBlockSizeShift;
10600  res = CreateBlock(newBlockSize, &newBlockIndex);
10601  }
10602  else
10603  {
10604  break;
10605  }
10606  }
10607  }
10608 
10609  if(res == VK_SUCCESS)
10610  {
10611  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10612  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10613 
10614  res = AllocateFromBlock(
10615  pBlock,
10616  hCurrentPool,
10617  currentFrameIndex,
10618  size,
10619  alignment,
10620  allocFlagsCopy,
10621  createInfo.pUserData,
10622  suballocType,
10623  strategy,
10624  pAllocation);
10625  if(res == VK_SUCCESS)
10626  {
10627  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10628  return VK_SUCCESS;
10629  }
10630  else
10631  {
10632  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10633  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10634  }
10635  }
10636  }
10637  }
10638 
10639  // 3. Try to allocate from existing blocks with making other allocations lost.
10640  if(canMakeOtherLost)
10641  {
10642  uint32_t tryIndex = 0;
10643  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10644  {
10645  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10646  VmaAllocationRequest bestRequest = {};
10647  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10648 
10649  // 1. Search existing allocations.
10651  {
10652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10654  {
10655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10656  VMA_ASSERT(pCurrBlock);
10657  VmaAllocationRequest currRequest = {};
10658  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10659  currentFrameIndex,
10660  m_FrameInUseCount,
10661  m_BufferImageGranularity,
10662  size,
10663  alignment,
10664  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10665  suballocType,
10666  canMakeOtherLost,
10667  strategy,
10668  &currRequest))
10669  {
10670  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10671  if(pBestRequestBlock == VMA_NULL ||
10672  currRequestCost < bestRequestCost)
10673  {
10674  pBestRequestBlock = pCurrBlock;
10675  bestRequest = currRequest;
10676  bestRequestCost = currRequestCost;
10677 
10678  if(bestRequestCost == 0)
10679  {
10680  break;
10681  }
10682  }
10683  }
10684  }
10685  }
10686  else // WORST_FIT, FIRST_FIT
10687  {
10688  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10689  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10690  {
10691  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10692  VMA_ASSERT(pCurrBlock);
10693  VmaAllocationRequest currRequest = {};
10694  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10695  currentFrameIndex,
10696  m_FrameInUseCount,
10697  m_BufferImageGranularity,
10698  size,
10699  alignment,
10700  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10701  suballocType,
10702  canMakeOtherLost,
10703  strategy,
10704  &currRequest))
10705  {
10706  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10707  if(pBestRequestBlock == VMA_NULL ||
10708  currRequestCost < bestRequestCost ||
10710  {
10711  pBestRequestBlock = pCurrBlock;
10712  bestRequest = currRequest;
10713  bestRequestCost = currRequestCost;
10714 
10715  if(bestRequestCost == 0 ||
10717  {
10718  break;
10719  }
10720  }
10721  }
10722  }
10723  }
10724 
10725  if(pBestRequestBlock != VMA_NULL)
10726  {
10727  if(mapped)
10728  {
10729  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10730  if(res != VK_SUCCESS)
10731  {
10732  return res;
10733  }
10734  }
10735 
10736  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10737  currentFrameIndex,
10738  m_FrameInUseCount,
10739  &bestRequest))
10740  {
10741  // We no longer have an empty Allocation.
10742  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10743  {
10744  m_HasEmptyBlock = false;
10745  }
10746  // Allocate from this pBlock.
10747  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10748  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10749  (*pAllocation)->InitBlockAllocation(
10750  hCurrentPool,
10751  pBestRequestBlock,
10752  bestRequest.offset,
10753  alignment,
10754  size,
10755  suballocType,
10756  mapped,
10757  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10758  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10759  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10760  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10761  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10762  {
10763  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10764  }
10765  if(IsCorruptionDetectionEnabled())
10766  {
10767  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10768  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10769  }
10770  return VK_SUCCESS;
10771  }
10772  // else: Some allocations must have been touched while we are here. Next try.
10773  }
10774  else
10775  {
10776  // Could not find place in any of the blocks - break outer loop.
10777  break;
10778  }
10779  }
10780  /* Maximum number of tries exceeded - a very unlike event when many other
10781  threads are simultaneously touching allocations making it impossible to make
10782  lost at the same time as we try to allocate. */
10783  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10784  {
10785  return VK_ERROR_TOO_MANY_OBJECTS;
10786  }
10787  }
10788 
10789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10790 }
10791 
10792 void VmaBlockVector::Free(
10793  VmaAllocation hAllocation)
10794 {
10795  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10796 
10797  // Scope for lock.
10798  {
10799  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10800 
10801  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10802 
10803  if(IsCorruptionDetectionEnabled())
10804  {
10805  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10806  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10807  }
10808 
10809  if(hAllocation->IsPersistentMap())
10810  {
10811  pBlock->Unmap(m_hAllocator, 1);
10812  }
10813 
10814  pBlock->m_pMetadata->Free(hAllocation);
10815  VMA_HEAVY_ASSERT(pBlock->Validate());
10816 
10817  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10818 
10819  // pBlock became empty after this deallocation.
10820  if(pBlock->m_pMetadata->IsEmpty())
10821  {
10822  // Already has empty Allocation. We don't want to have two, so delete this one.
10823  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10824  {
10825  pBlockToDelete = pBlock;
10826  Remove(pBlock);
10827  }
10828  // We now have first empty block.
10829  else
10830  {
10831  m_HasEmptyBlock = true;
10832  }
10833  }
10834  // pBlock didn't become empty, but we have another empty block - find and free that one.
10835  // (This is optional, heuristics.)
10836  else if(m_HasEmptyBlock)
10837  {
10838  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10839  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10840  {
10841  pBlockToDelete = pLastBlock;
10842  m_Blocks.pop_back();
10843  m_HasEmptyBlock = false;
10844  }
10845  }
10846 
10847  IncrementallySortBlocks();
10848  }
10849 
10850  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10851  // lock, for performance reason.
10852  if(pBlockToDelete != VMA_NULL)
10853  {
10854  VMA_DEBUG_LOG(" Deleted empty allocation");
10855  pBlockToDelete->Destroy(m_hAllocator);
10856  vma_delete(m_hAllocator, pBlockToDelete);
10857  }
10858 }
10859 
10860 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10861 {
10862  VkDeviceSize result = 0;
10863  for(size_t i = m_Blocks.size(); i--; )
10864  {
10865  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10866  if(result >= m_PreferredBlockSize)
10867  {
10868  break;
10869  }
10870  }
10871  return result;
10872 }
10873 
10874 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10875 {
10876  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10877  {
10878  if(m_Blocks[blockIndex] == pBlock)
10879  {
10880  VmaVectorRemove(m_Blocks, blockIndex);
10881  return;
10882  }
10883  }
10884  VMA_ASSERT(0);
10885 }
10886 
10887 void VmaBlockVector::IncrementallySortBlocks()
10888 {
10889  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10890  {
10891  // Bubble sort only until first swap.
10892  for(size_t i = 1; i < m_Blocks.size(); ++i)
10893  {
10894  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10895  {
10896  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10897  return;
10898  }
10899  }
10900  }
10901 }
10902 
10903 VkResult VmaBlockVector::AllocateFromBlock(
10904  VmaDeviceMemoryBlock* pBlock,
10905  VmaPool hCurrentPool,
10906  uint32_t currentFrameIndex,
10907  VkDeviceSize size,
10908  VkDeviceSize alignment,
10909  VmaAllocationCreateFlags allocFlags,
10910  void* pUserData,
10911  VmaSuballocationType suballocType,
10912  uint32_t strategy,
10913  VmaAllocation* pAllocation)
10914 {
10915  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10916  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10917  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10918  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10919 
10920  VmaAllocationRequest currRequest = {};
10921  if(pBlock->m_pMetadata->CreateAllocationRequest(
10922  currentFrameIndex,
10923  m_FrameInUseCount,
10924  m_BufferImageGranularity,
10925  size,
10926  alignment,
10927  isUpperAddress,
10928  suballocType,
10929  false, // canMakeOtherLost
10930  strategy,
10931  &currRequest))
10932  {
10933  // Allocate from pCurrBlock.
10934  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10935 
10936  if(mapped)
10937  {
10938  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10939  if(res != VK_SUCCESS)
10940  {
10941  return res;
10942  }
10943  }
10944 
10945  // We no longer have an empty Allocation.
10946  if(pBlock->m_pMetadata->IsEmpty())
10947  {
10948  m_HasEmptyBlock = false;
10949  }
10950 
10951  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10952  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10953  (*pAllocation)->InitBlockAllocation(
10954  hCurrentPool,
10955  pBlock,
10956  currRequest.offset,
10957  alignment,
10958  size,
10959  suballocType,
10960  mapped,
10961  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10962  VMA_HEAVY_ASSERT(pBlock->Validate());
10963  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10965  {
10966  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10967  }
10968  if(IsCorruptionDetectionEnabled())
10969  {
10970  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10971  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10972  }
10973  return VK_SUCCESS;
10974  }
10975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10976 }
10977 
10978 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10979 {
10980  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10981  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10982  allocInfo.allocationSize = blockSize;
10983  VkDeviceMemory mem = VK_NULL_HANDLE;
10984  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10985  if(res < 0)
10986  {
10987  return res;
10988  }
10989 
10990  // New VkDeviceMemory successfully created.
10991 
10992  // Create new Allocation for it.
10993  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10994  pBlock->Init(
10995  m_hAllocator,
10996  m_MemoryTypeIndex,
10997  mem,
10998  allocInfo.allocationSize,
10999  m_NextBlockId++,
11000  m_Algorithm);
11001 
11002  m_Blocks.push_back(pBlock);
11003  if(pNewBlockIndex != VMA_NULL)
11004  {
11005  *pNewBlockIndex = m_Blocks.size() - 1;
11006  }
11007 
11008  return VK_SUCCESS;
11009 }
11010 
11011 #if VMA_STATS_STRING_ENABLED
11012 
11013 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11014 {
11015  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11016 
11017  json.BeginObject();
11018 
11019  if(m_IsCustomPool)
11020  {
11021  json.WriteString("MemoryTypeIndex");
11022  json.WriteNumber(m_MemoryTypeIndex);
11023 
11024  json.WriteString("BlockSize");
11025  json.WriteNumber(m_PreferredBlockSize);
11026 
11027  json.WriteString("BlockCount");
11028  json.BeginObject(true);
11029  if(m_MinBlockCount > 0)
11030  {
11031  json.WriteString("Min");
11032  json.WriteNumber((uint64_t)m_MinBlockCount);
11033  }
11034  if(m_MaxBlockCount < SIZE_MAX)
11035  {
11036  json.WriteString("Max");
11037  json.WriteNumber((uint64_t)m_MaxBlockCount);
11038  }
11039  json.WriteString("Cur");
11040  json.WriteNumber((uint64_t)m_Blocks.size());
11041  json.EndObject();
11042 
11043  if(m_FrameInUseCount > 0)
11044  {
11045  json.WriteString("FrameInUseCount");
11046  json.WriteNumber(m_FrameInUseCount);
11047  }
11048 
11049  if(m_Algorithm != 0)
11050  {
11051  json.WriteString("Algorithm");
11052  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11053  }
11054  }
11055  else
11056  {
11057  json.WriteString("PreferredBlockSize");
11058  json.WriteNumber(m_PreferredBlockSize);
11059  }
11060 
11061  json.WriteString("Blocks");
11062  json.BeginObject();
11063  for(size_t i = 0; i < m_Blocks.size(); ++i)
11064  {
11065  json.BeginString();
11066  json.ContinueString(m_Blocks[i]->GetId());
11067  json.EndString();
11068 
11069  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11070  }
11071  json.EndObject();
11072 
11073  json.EndObject();
11074 }
11075 
11076 #endif // #if VMA_STATS_STRING_ENABLED
11077 
11078 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11079  VmaAllocator hAllocator,
11080  uint32_t currentFrameIndex)
11081 {
11082  if(m_pDefragmentator == VMA_NULL)
11083  {
11084  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11085  hAllocator,
11086  this,
11087  currentFrameIndex);
11088  }
11089 
11090  return m_pDefragmentator;
11091 }
11092 
11093 VkResult VmaBlockVector::Defragment(
11094  VmaDefragmentationStats* pDefragmentationStats,
11095  VkDeviceSize& maxBytesToMove,
11096  uint32_t& maxAllocationsToMove)
11097 {
11098  if(m_pDefragmentator == VMA_NULL)
11099  {
11100  return VK_SUCCESS;
11101  }
11102 
11103  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11104 
11105  // Defragment.
11106  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11107 
11108  // Accumulate statistics.
11109  if(pDefragmentationStats != VMA_NULL)
11110  {
11111  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11112  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11113  pDefragmentationStats->bytesMoved += bytesMoved;
11114  pDefragmentationStats->allocationsMoved += allocationsMoved;
11115  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11116  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11117  maxBytesToMove -= bytesMoved;
11118  maxAllocationsToMove -= allocationsMoved;
11119  }
11120 
11121  // Free empty blocks.
11122  m_HasEmptyBlock = false;
11123  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11124  {
11125  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11126  if(pBlock->m_pMetadata->IsEmpty())
11127  {
11128  if(m_Blocks.size() > m_MinBlockCount)
11129  {
11130  if(pDefragmentationStats != VMA_NULL)
11131  {
11132  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11133  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11134  }
11135 
11136  VmaVectorRemove(m_Blocks, blockIndex);
11137  pBlock->Destroy(m_hAllocator);
11138  vma_delete(m_hAllocator, pBlock);
11139  }
11140  else
11141  {
11142  m_HasEmptyBlock = true;
11143  }
11144  }
11145  }
11146 
11147  return result;
11148 }
11149 
11150 void VmaBlockVector::DestroyDefragmentator()
11151 {
11152  if(m_pDefragmentator != VMA_NULL)
11153  {
11154  vma_delete(m_hAllocator, m_pDefragmentator);
11155  m_pDefragmentator = VMA_NULL;
11156  }
11157 }
11158 
11159 void VmaBlockVector::MakePoolAllocationsLost(
11160  uint32_t currentFrameIndex,
11161  size_t* pLostAllocationCount)
11162 {
11163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11164  size_t lostAllocationCount = 0;
11165  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11166  {
11167  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11168  VMA_ASSERT(pBlock);
11169  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11170  }
11171  if(pLostAllocationCount != VMA_NULL)
11172  {
11173  *pLostAllocationCount = lostAllocationCount;
11174  }
11175 }
11176 
11177 VkResult VmaBlockVector::CheckCorruption()
11178 {
11179  if(!IsCorruptionDetectionEnabled())
11180  {
11181  return VK_ERROR_FEATURE_NOT_PRESENT;
11182  }
11183 
11184  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11185  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11186  {
11187  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11188  VMA_ASSERT(pBlock);
11189  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11190  if(res != VK_SUCCESS)
11191  {
11192  return res;
11193  }
11194  }
11195  return VK_SUCCESS;
11196 }
11197 
11198 void VmaBlockVector::AddStats(VmaStats* pStats)
11199 {
11200  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11201  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11202 
11203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11204 
11205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11206  {
11207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11208  VMA_ASSERT(pBlock);
11209  VMA_HEAVY_ASSERT(pBlock->Validate());
11210  VmaStatInfo allocationStatInfo;
11211  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11212  VmaAddStatInfo(pStats->total, allocationStatInfo);
11213  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11214  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11215  }
11216 }
11217 
11219 // VmaDefragmentator members definition
11220 
11221 VmaDefragmentator::VmaDefragmentator(
11222  VmaAllocator hAllocator,
11223  VmaBlockVector* pBlockVector,
11224  uint32_t currentFrameIndex) :
11225  m_hAllocator(hAllocator),
11226  m_pBlockVector(pBlockVector),
11227  m_CurrentFrameIndex(currentFrameIndex),
11228  m_BytesMoved(0),
11229  m_AllocationsMoved(0),
11230  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11231  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11232 {
11233  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11234 }
11235 
11236 VmaDefragmentator::~VmaDefragmentator()
11237 {
11238  for(size_t i = m_Blocks.size(); i--; )
11239  {
11240  vma_delete(m_hAllocator, m_Blocks[i]);
11241  }
11242 }
11243 
11244 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11245 {
11246  AllocationInfo allocInfo;
11247  allocInfo.m_hAllocation = hAlloc;
11248  allocInfo.m_pChanged = pChanged;
11249  m_Allocations.push_back(allocInfo);
11250 }
11251 
11252 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11253 {
11254  // It has already been mapped for defragmentation.
11255  if(m_pMappedDataForDefragmentation)
11256  {
11257  *ppMappedData = m_pMappedDataForDefragmentation;
11258  return VK_SUCCESS;
11259  }
11260 
11261  // It is originally mapped.
11262  if(m_pBlock->GetMappedData())
11263  {
11264  *ppMappedData = m_pBlock->GetMappedData();
11265  return VK_SUCCESS;
11266  }
11267 
11268  // Map on first usage.
11269  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11270  *ppMappedData = m_pMappedDataForDefragmentation;
11271  return res;
11272 }
11273 
11274 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11275 {
11276  if(m_pMappedDataForDefragmentation != VMA_NULL)
11277  {
11278  m_pBlock->Unmap(hAllocator, 1);
11279  }
11280 }
11281 
11282 VkResult VmaDefragmentator::DefragmentRound(
11283  VkDeviceSize maxBytesToMove,
11284  uint32_t maxAllocationsToMove)
11285 {
11286  if(m_Blocks.empty())
11287  {
11288  return VK_SUCCESS;
11289  }
11290 
11291  size_t srcBlockIndex = m_Blocks.size() - 1;
11292  size_t srcAllocIndex = SIZE_MAX;
11293  for(;;)
11294  {
11295  // 1. Find next allocation to move.
11296  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11297  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11298  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11299  {
11300  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11301  {
11302  // Finished: no more allocations to process.
11303  if(srcBlockIndex == 0)
11304  {
11305  return VK_SUCCESS;
11306  }
11307  else
11308  {
11309  --srcBlockIndex;
11310  srcAllocIndex = SIZE_MAX;
11311  }
11312  }
11313  else
11314  {
11315  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11316  }
11317  }
11318 
11319  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11320  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11321 
11322  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11323  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11324  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11325  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11326 
11327  // 2. Try to find new place for this allocation in preceding or current block.
11328  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11329  {
11330  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11331  VmaAllocationRequest dstAllocRequest;
11332  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11333  m_CurrentFrameIndex,
11334  m_pBlockVector->GetFrameInUseCount(),
11335  m_pBlockVector->GetBufferImageGranularity(),
11336  size,
11337  alignment,
11338  false, // upperAddress
11339  suballocType,
11340  false, // canMakeOtherLost
11342  &dstAllocRequest) &&
11343  MoveMakesSense(
11344  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11345  {
11346  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11347 
11348  // Reached limit on number of allocations or bytes to move.
11349  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11350  (m_BytesMoved + size > maxBytesToMove))
11351  {
11352  return VK_INCOMPLETE;
11353  }
11354 
11355  void* pDstMappedData = VMA_NULL;
11356  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11357  if(res != VK_SUCCESS)
11358  {
11359  return res;
11360  }
11361 
11362  void* pSrcMappedData = VMA_NULL;
11363  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11364  if(res != VK_SUCCESS)
11365  {
11366  return res;
11367  }
11368 
11369  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11370  memcpy(
11371  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11372  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11373  static_cast<size_t>(size));
11374 
11375  if(VMA_DEBUG_MARGIN > 0)
11376  {
11377  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11378  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11379  }
11380 
11381  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11382  dstAllocRequest,
11383  suballocType,
11384  size,
11385  false, // upperAddress
11386  allocInfo.m_hAllocation);
11387  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11388 
11389  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11390 
11391  if(allocInfo.m_pChanged != VMA_NULL)
11392  {
11393  *allocInfo.m_pChanged = VK_TRUE;
11394  }
11395 
11396  ++m_AllocationsMoved;
11397  m_BytesMoved += size;
11398 
11399  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11400 
11401  break;
11402  }
11403  }
11404 
11405  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11406 
11407  if(srcAllocIndex > 0)
11408  {
11409  --srcAllocIndex;
11410  }
11411  else
11412  {
11413  if(srcBlockIndex > 0)
11414  {
11415  --srcBlockIndex;
11416  srcAllocIndex = SIZE_MAX;
11417  }
11418  else
11419  {
11420  return VK_SUCCESS;
11421  }
11422  }
11423  }
11424 }
11425 
11426 VkResult VmaDefragmentator::Defragment(
11427  VkDeviceSize maxBytesToMove,
11428  uint32_t maxAllocationsToMove)
11429 {
11430  if(m_Allocations.empty())
11431  {
11432  return VK_SUCCESS;
11433  }
11434 
11435  // Create block info for each block.
11436  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11437  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11438  {
11439  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11440  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11441  m_Blocks.push_back(pBlockInfo);
11442  }
11443 
11444  // Sort them by m_pBlock pointer value.
11445  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11446 
11447  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11448  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11449  {
11450  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11451  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11452  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11453  {
11454  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11455  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11456  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11457  {
11458  (*it)->m_Allocations.push_back(allocInfo);
11459  }
11460  else
11461  {
11462  VMA_ASSERT(0);
11463  }
11464  }
11465  }
11466  m_Allocations.clear();
11467 
11468  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11469  {
11470  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11471  pBlockInfo->CalcHasNonMovableAllocations();
11472  pBlockInfo->SortAllocationsBySizeDescecnding();
11473  }
11474 
11475  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11476  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11477 
11478  // Execute defragmentation rounds (the main part).
11479  VkResult result = VK_SUCCESS;
11480  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11481  {
11482  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11483  }
11484 
11485  // Unmap blocks that were mapped for defragmentation.
11486  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11487  {
11488  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11489  }
11490 
11491  return result;
11492 }
11493 
11494 bool VmaDefragmentator::MoveMakesSense(
11495  size_t dstBlockIndex, VkDeviceSize dstOffset,
11496  size_t srcBlockIndex, VkDeviceSize srcOffset)
11497 {
11498  if(dstBlockIndex < srcBlockIndex)
11499  {
11500  return true;
11501  }
11502  if(dstBlockIndex > srcBlockIndex)
11503  {
11504  return false;
11505  }
11506  if(dstOffset < srcOffset)
11507  {
11508  return true;
11509  }
11510  return false;
11511 }
11512 
11514 // VmaRecorder
11515 
11516 #if VMA_RECORDING_ENABLED
11517 
11518 VmaRecorder::VmaRecorder() :
11519  m_UseMutex(true),
11520  m_Flags(0),
11521  m_File(VMA_NULL),
11522  m_Freq(INT64_MAX),
11523  m_StartCounter(INT64_MAX)
11524 {
11525 }
11526 
11527 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11528 {
11529  m_UseMutex = useMutex;
11530  m_Flags = settings.flags;
11531 
11532  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11533  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11534 
11535  // Open file for writing.
11536  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11537  if(err != 0)
11538  {
11539  return VK_ERROR_INITIALIZATION_FAILED;
11540  }
11541 
11542  // Write header.
11543  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11544  fprintf(m_File, "%s\n", "1,4");
11545 
11546  return VK_SUCCESS;
11547 }
11548 
11549 VmaRecorder::~VmaRecorder()
11550 {
11551  if(m_File != VMA_NULL)
11552  {
11553  fclose(m_File);
11554  }
11555 }
11556 
11557 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11558 {
11559  CallParams callParams;
11560  GetBasicParams(callParams);
11561 
11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11563  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11564  Flush();
11565 }
11566 
11567 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11568 {
11569  CallParams callParams;
11570  GetBasicParams(callParams);
11571 
11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11573  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11574  Flush();
11575 }
11576 
11577 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11578 {
11579  CallParams callParams;
11580  GetBasicParams(callParams);
11581 
11582  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11583  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11584  createInfo.memoryTypeIndex,
11585  createInfo.flags,
11586  createInfo.blockSize,
11587  (uint64_t)createInfo.minBlockCount,
11588  (uint64_t)createInfo.maxBlockCount,
11589  createInfo.frameInUseCount,
11590  pool);
11591  Flush();
11592 }
11593 
11594 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11595 {
11596  CallParams callParams;
11597  GetBasicParams(callParams);
11598 
11599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11600  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11601  pool);
11602  Flush();
11603 }
11604 
11605 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11606  const VkMemoryRequirements& vkMemReq,
11607  const VmaAllocationCreateInfo& createInfo,
11608  VmaAllocation allocation)
11609 {
11610  CallParams callParams;
11611  GetBasicParams(callParams);
11612 
11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11614  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11615  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11616  vkMemReq.size,
11617  vkMemReq.alignment,
11618  vkMemReq.memoryTypeBits,
11619  createInfo.flags,
11620  createInfo.usage,
11621  createInfo.requiredFlags,
11622  createInfo.preferredFlags,
11623  createInfo.memoryTypeBits,
11624  createInfo.pool,
11625  allocation,
11626  userDataStr.GetString());
11627  Flush();
11628 }
11629 
11630 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11631  const VkMemoryRequirements& vkMemReq,
11632  bool requiresDedicatedAllocation,
11633  bool prefersDedicatedAllocation,
11634  const VmaAllocationCreateInfo& createInfo,
11635  VmaAllocation allocation)
11636 {
11637  CallParams callParams;
11638  GetBasicParams(callParams);
11639 
11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11641  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11642  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11643  vkMemReq.size,
11644  vkMemReq.alignment,
11645  vkMemReq.memoryTypeBits,
11646  requiresDedicatedAllocation ? 1 : 0,
11647  prefersDedicatedAllocation ? 1 : 0,
11648  createInfo.flags,
11649  createInfo.usage,
11650  createInfo.requiredFlags,
11651  createInfo.preferredFlags,
11652  createInfo.memoryTypeBits,
11653  createInfo.pool,
11654  allocation,
11655  userDataStr.GetString());
11656  Flush();
11657 }
11658 
11659 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11660  const VkMemoryRequirements& vkMemReq,
11661  bool requiresDedicatedAllocation,
11662  bool prefersDedicatedAllocation,
11663  const VmaAllocationCreateInfo& createInfo,
11664  VmaAllocation allocation)
11665 {
11666  CallParams callParams;
11667  GetBasicParams(callParams);
11668 
11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11672  vkMemReq.size,
11673  vkMemReq.alignment,
11674  vkMemReq.memoryTypeBits,
11675  requiresDedicatedAllocation ? 1 : 0,
11676  prefersDedicatedAllocation ? 1 : 0,
11677  createInfo.flags,
11678  createInfo.usage,
11679  createInfo.requiredFlags,
11680  createInfo.preferredFlags,
11681  createInfo.memoryTypeBits,
11682  createInfo.pool,
11683  allocation,
11684  userDataStr.GetString());
11685  Flush();
11686 }
11687 
11688 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11689  VmaAllocation allocation)
11690 {
11691  CallParams callParams;
11692  GetBasicParams(callParams);
11693 
11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11695  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11696  allocation);
11697  Flush();
11698 }
11699 
11700 void VmaRecorder::RecordResizeAllocation(
11701  uint32_t frameIndex,
11702  VmaAllocation allocation,
11703  VkDeviceSize newSize)
11704 {
11705  CallParams callParams;
11706  GetBasicParams(callParams);
11707 
11708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11709  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11710  allocation, newSize);
11711  Flush();
11712 }
11713 
11714 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11715  VmaAllocation allocation,
11716  const void* pUserData)
11717 {
11718  CallParams callParams;
11719  GetBasicParams(callParams);
11720 
11721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11722  UserDataString userDataStr(
11723  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11724  pUserData);
11725  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11726  allocation,
11727  userDataStr.GetString());
11728  Flush();
11729 }
11730 
11731 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11732  VmaAllocation allocation)
11733 {
11734  CallParams callParams;
11735  GetBasicParams(callParams);
11736 
11737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11738  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11739  allocation);
11740  Flush();
11741 }
11742 
11743 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11744  VmaAllocation allocation)
11745 {
11746  CallParams callParams;
11747  GetBasicParams(callParams);
11748 
11749  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11750  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11751  allocation);
11752  Flush();
11753 }
11754 
11755 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11756  VmaAllocation allocation)
11757 {
11758  CallParams callParams;
11759  GetBasicParams(callParams);
11760 
11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11762  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11763  allocation);
11764  Flush();
11765 }
11766 
11767 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11768  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11769 {
11770  CallParams callParams;
11771  GetBasicParams(callParams);
11772 
11773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11774  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11775  allocation,
11776  offset,
11777  size);
11778  Flush();
11779 }
11780 
11781 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11782  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11783 {
11784  CallParams callParams;
11785  GetBasicParams(callParams);
11786 
11787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11788  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11789  allocation,
11790  offset,
11791  size);
11792  Flush();
11793 }
11794 
11795 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11796  const VkBufferCreateInfo& bufCreateInfo,
11797  const VmaAllocationCreateInfo& allocCreateInfo,
11798  VmaAllocation allocation)
11799 {
11800  CallParams callParams;
11801  GetBasicParams(callParams);
11802 
11803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11804  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11805  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11806  bufCreateInfo.flags,
11807  bufCreateInfo.size,
11808  bufCreateInfo.usage,
11809  bufCreateInfo.sharingMode,
11810  allocCreateInfo.flags,
11811  allocCreateInfo.usage,
11812  allocCreateInfo.requiredFlags,
11813  allocCreateInfo.preferredFlags,
11814  allocCreateInfo.memoryTypeBits,
11815  allocCreateInfo.pool,
11816  allocation,
11817  userDataStr.GetString());
11818  Flush();
11819 }
11820 
11821 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11822  const VkImageCreateInfo& imageCreateInfo,
11823  const VmaAllocationCreateInfo& allocCreateInfo,
11824  VmaAllocation allocation)
11825 {
11826  CallParams callParams;
11827  GetBasicParams(callParams);
11828 
11829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11830  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11831  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11832  imageCreateInfo.flags,
11833  imageCreateInfo.imageType,
11834  imageCreateInfo.format,
11835  imageCreateInfo.extent.width,
11836  imageCreateInfo.extent.height,
11837  imageCreateInfo.extent.depth,
11838  imageCreateInfo.mipLevels,
11839  imageCreateInfo.arrayLayers,
11840  imageCreateInfo.samples,
11841  imageCreateInfo.tiling,
11842  imageCreateInfo.usage,
11843  imageCreateInfo.sharingMode,
11844  imageCreateInfo.initialLayout,
11845  allocCreateInfo.flags,
11846  allocCreateInfo.usage,
11847  allocCreateInfo.requiredFlags,
11848  allocCreateInfo.preferredFlags,
11849  allocCreateInfo.memoryTypeBits,
11850  allocCreateInfo.pool,
11851  allocation,
11852  userDataStr.GetString());
11853  Flush();
11854 }
11855 
11856 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11857  VmaAllocation allocation)
11858 {
11859  CallParams callParams;
11860  GetBasicParams(callParams);
11861 
11862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11863  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11864  allocation);
11865  Flush();
11866 }
11867 
11868 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11869  VmaAllocation allocation)
11870 {
11871  CallParams callParams;
11872  GetBasicParams(callParams);
11873 
11874  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11875  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11876  allocation);
11877  Flush();
11878 }
11879 
11880 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11881  VmaAllocation allocation)
11882 {
11883  CallParams callParams;
11884  GetBasicParams(callParams);
11885 
11886  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11887  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11888  allocation);
11889  Flush();
11890 }
11891 
11892 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11893  VmaAllocation allocation)
11894 {
11895  CallParams callParams;
11896  GetBasicParams(callParams);
11897 
11898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11899  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11900  allocation);
11901  Flush();
11902 }
11903 
11904 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11905  VmaPool pool)
11906 {
11907  CallParams callParams;
11908  GetBasicParams(callParams);
11909 
11910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11911  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11912  pool);
11913  Flush();
11914 }
11915 
11916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11917 {
11918  if(pUserData != VMA_NULL)
11919  {
11920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11921  {
11922  m_Str = (const char*)pUserData;
11923  }
11924  else
11925  {
11926  sprintf_s(m_PtrStr, "%p", pUserData);
11927  m_Str = m_PtrStr;
11928  }
11929  }
11930  else
11931  {
11932  m_Str = "";
11933  }
11934 }
11935 
11936 void VmaRecorder::WriteConfiguration(
11937  const VkPhysicalDeviceProperties& devProps,
11938  const VkPhysicalDeviceMemoryProperties& memProps,
11939  bool dedicatedAllocationExtensionEnabled)
11940 {
11941  fprintf(m_File, "Config,Begin\n");
11942 
11943  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11944  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11945  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11946  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11947  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11948  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11949 
11950  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11951  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11952  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11953 
11954  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11955  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11956  {
11957  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11958  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11959  }
11960  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11961  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11962  {
11963  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11964  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11965  }
11966 
11967  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11968 
11969  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11970  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11971  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11972  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11973  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11974  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11976  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11977  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11978 
11979  fprintf(m_File, "Config,End\n");
11980 }
11981 
11982 void VmaRecorder::GetBasicParams(CallParams& outParams)
11983 {
11984  outParams.threadId = GetCurrentThreadId();
11985 
11986  LARGE_INTEGER counter;
11987  QueryPerformanceCounter(&counter);
11988  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11989 }
11990 
11991 void VmaRecorder::Flush()
11992 {
11993  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11994  {
11995  fflush(m_File);
11996  }
11997 }
11998 
11999 #endif // #if VMA_RECORDING_ENABLED
12000 
12002 // VmaAllocator_T
12003 
12004 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12005  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12006  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12007  m_hDevice(pCreateInfo->device),
12008  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12009  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12010  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12011  m_PreferredLargeHeapBlockSize(0),
12012  m_PhysicalDevice(pCreateInfo->physicalDevice),
12013  m_CurrentFrameIndex(0),
12014  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12015  m_NextPoolId(0)
12017  ,m_pRecorder(VMA_NULL)
12018 #endif
12019 {
12020  if(VMA_DEBUG_DETECT_CORRUPTION)
12021  {
12022  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12023  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12024  }
12025 
12026  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12027 
12028 #if !(VMA_DEDICATED_ALLOCATION)
12030  {
12031  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12032  }
12033 #endif
12034 
12035  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12036  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12037  memset(&m_MemProps, 0, sizeof(m_MemProps));
12038 
12039  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12040  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12041 
12042  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12043  {
12044  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12045  }
12046 
12047  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12048  {
12049  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12050  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12051  }
12052 
12053  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12054 
12055  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12056  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12057 
12058  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12059  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12060  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12061  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12062 
12063  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12064  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12065 
12066  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12067  {
12068  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12069  {
12070  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12071  if(limit != VK_WHOLE_SIZE)
12072  {
12073  m_HeapSizeLimit[heapIndex] = limit;
12074  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12075  {
12076  m_MemProps.memoryHeaps[heapIndex].size = limit;
12077  }
12078  }
12079  }
12080  }
12081 
12082  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12083  {
12084  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12085 
12086  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12087  this,
12088  memTypeIndex,
12089  preferredBlockSize,
12090  0,
12091  SIZE_MAX,
12092  GetBufferImageGranularity(),
12093  pCreateInfo->frameInUseCount,
12094  false, // isCustomPool
12095  false, // explicitBlockSize
12096  false); // linearAlgorithm
12097  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12098  // becase minBlockCount is 0.
12099  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12100 
12101  }
12102 }
12103 
12104 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12105 {
12106  VkResult res = VK_SUCCESS;
12107 
12108  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12109  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12110  {
12111 #if VMA_RECORDING_ENABLED
12112  m_pRecorder = vma_new(this, VmaRecorder)();
12113  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12114  if(res != VK_SUCCESS)
12115  {
12116  return res;
12117  }
12118  m_pRecorder->WriteConfiguration(
12119  m_PhysicalDeviceProperties,
12120  m_MemProps,
12121  m_UseKhrDedicatedAllocation);
12122  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12123 #else
12124  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12125  return VK_ERROR_FEATURE_NOT_PRESENT;
12126 #endif
12127  }
12128 
12129  return res;
12130 }
12131 
12132 VmaAllocator_T::~VmaAllocator_T()
12133 {
12134 #if VMA_RECORDING_ENABLED
12135  if(m_pRecorder != VMA_NULL)
12136  {
12137  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12138  vma_delete(this, m_pRecorder);
12139  }
12140 #endif
12141 
12142  VMA_ASSERT(m_Pools.empty());
12143 
12144  for(size_t i = GetMemoryTypeCount(); i--; )
12145  {
12146  vma_delete(this, m_pDedicatedAllocations[i]);
12147  vma_delete(this, m_pBlockVectors[i]);
12148  }
12149 }
12150 
12151 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12152 {
12153 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12154  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12155  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12156  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12157  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12158  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12159  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12160  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12161  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12162  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12163  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12164  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12165  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12166  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12167  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12168  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12169  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12170 #if VMA_DEDICATED_ALLOCATION
12171  if(m_UseKhrDedicatedAllocation)
12172  {
12173  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12174  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12175  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12176  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12177  }
12178 #endif // #if VMA_DEDICATED_ALLOCATION
12179 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12180 
12181 #define VMA_COPY_IF_NOT_NULL(funcName) \
12182  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12183 
12184  if(pVulkanFunctions != VMA_NULL)
12185  {
12186  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12187  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12188  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12189  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12190  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12191  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12192  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12193  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12194  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12195  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12196  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12197  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12198  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12199  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12200  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12201  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12202 #if VMA_DEDICATED_ALLOCATION
12203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12205 #endif
12206  }
12207 
12208 #undef VMA_COPY_IF_NOT_NULL
12209 
12210  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12211  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12212  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12213  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12214  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12215  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12216  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12217  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12228 #if VMA_DEDICATED_ALLOCATION
12229  if(m_UseKhrDedicatedAllocation)
12230  {
12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12233  }
12234 #endif
12235 }
12236 
12237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12238 {
12239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12243 }
12244 
12245 VkResult VmaAllocator_T::AllocateMemoryOfType(
12246  VkDeviceSize size,
12247  VkDeviceSize alignment,
12248  bool dedicatedAllocation,
12249  VkBuffer dedicatedBuffer,
12250  VkImage dedicatedImage,
12251  const VmaAllocationCreateInfo& createInfo,
12252  uint32_t memTypeIndex,
12253  VmaSuballocationType suballocType,
12254  VmaAllocation* pAllocation)
12255 {
12256  VMA_ASSERT(pAllocation != VMA_NULL);
12257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12258 
12259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12260 
12261  // If memory type is not HOST_VISIBLE, disable MAPPED.
12262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12264  {
12265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12266  }
12267 
12268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12269  VMA_ASSERT(blockVector);
12270 
12271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12272  bool preferDedicatedMemory =
12273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12274  dedicatedAllocation ||
12275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12276  size > preferredBlockSize / 2;
12277 
12278  if(preferDedicatedMemory &&
12279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12280  finalCreateInfo.pool == VK_NULL_HANDLE)
12281  {
12283  }
12284 
12285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12286  {
12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12288  {
12289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12290  }
12291  else
12292  {
12293  return AllocateDedicatedMemory(
12294  size,
12295  suballocType,
12296  memTypeIndex,
12297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12299  finalCreateInfo.pUserData,
12300  dedicatedBuffer,
12301  dedicatedImage,
12302  pAllocation);
12303  }
12304  }
12305  else
12306  {
12307  VkResult res = blockVector->Allocate(
12308  VK_NULL_HANDLE, // hCurrentPool
12309  m_CurrentFrameIndex.load(),
12310  size,
12311  alignment,
12312  finalCreateInfo,
12313  suballocType,
12314  pAllocation);
12315  if(res == VK_SUCCESS)
12316  {
12317  return res;
12318  }
12319 
12320  // 5. Try dedicated memory.
12321  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12322  {
12323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12324  }
12325  else
12326  {
12327  res = AllocateDedicatedMemory(
12328  size,
12329  suballocType,
12330  memTypeIndex,
12331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12332  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12333  finalCreateInfo.pUserData,
12334  dedicatedBuffer,
12335  dedicatedImage,
12336  pAllocation);
12337  if(res == VK_SUCCESS)
12338  {
12339  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12340  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12341  return VK_SUCCESS;
12342  }
12343  else
12344  {
12345  // Everything failed: Return error code.
12346  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12347  return res;
12348  }
12349  }
12350  }
12351 }
12352 
12353 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12354  VkDeviceSize size,
12355  VmaSuballocationType suballocType,
12356  uint32_t memTypeIndex,
12357  bool map,
12358  bool isUserDataString,
12359  void* pUserData,
12360  VkBuffer dedicatedBuffer,
12361  VkImage dedicatedImage,
12362  VmaAllocation* pAllocation)
12363 {
12364  VMA_ASSERT(pAllocation);
12365 
12366  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12367  allocInfo.memoryTypeIndex = memTypeIndex;
12368  allocInfo.allocationSize = size;
12369 
12370 #if VMA_DEDICATED_ALLOCATION
12371  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12372  if(m_UseKhrDedicatedAllocation)
12373  {
12374  if(dedicatedBuffer != VK_NULL_HANDLE)
12375  {
12376  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12377  dedicatedAllocInfo.buffer = dedicatedBuffer;
12378  allocInfo.pNext = &dedicatedAllocInfo;
12379  }
12380  else if(dedicatedImage != VK_NULL_HANDLE)
12381  {
12382  dedicatedAllocInfo.image = dedicatedImage;
12383  allocInfo.pNext = &dedicatedAllocInfo;
12384  }
12385  }
12386 #endif // #if VMA_DEDICATED_ALLOCATION
12387 
12388  // Allocate VkDeviceMemory.
12389  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12390  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12391  if(res < 0)
12392  {
12393  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12394  return res;
12395  }
12396 
12397  void* pMappedData = VMA_NULL;
12398  if(map)
12399  {
12400  res = (*m_VulkanFunctions.vkMapMemory)(
12401  m_hDevice,
12402  hMemory,
12403  0,
12404  VK_WHOLE_SIZE,
12405  0,
12406  &pMappedData);
12407  if(res < 0)
12408  {
12409  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12410  FreeVulkanMemory(memTypeIndex, size, hMemory);
12411  return res;
12412  }
12413  }
12414 
12415  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12416  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12417  (*pAllocation)->SetUserData(this, pUserData);
12418  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12419  {
12420  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12421  }
12422 
12423  // Register it in m_pDedicatedAllocations.
12424  {
12425  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12426  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12427  VMA_ASSERT(pDedicatedAllocations);
12428  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12429  }
12430 
12431  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12432 
12433  return VK_SUCCESS;
12434 }
12435 
12436 void VmaAllocator_T::GetBufferMemoryRequirements(
12437  VkBuffer hBuffer,
12438  VkMemoryRequirements& memReq,
12439  bool& requiresDedicatedAllocation,
12440  bool& prefersDedicatedAllocation) const
12441 {
12442 #if VMA_DEDICATED_ALLOCATION
12443  if(m_UseKhrDedicatedAllocation)
12444  {
12445  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12446  memReqInfo.buffer = hBuffer;
12447 
12448  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12449 
12450  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12451  memReq2.pNext = &memDedicatedReq;
12452 
12453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12454 
12455  memReq = memReq2.memoryRequirements;
12456  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12457  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12458  }
12459  else
12460 #endif // #if VMA_DEDICATED_ALLOCATION
12461  {
12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12463  requiresDedicatedAllocation = false;
12464  prefersDedicatedAllocation = false;
12465  }
12466 }
12467 
12468 void VmaAllocator_T::GetImageMemoryRequirements(
12469  VkImage hImage,
12470  VkMemoryRequirements& memReq,
12471  bool& requiresDedicatedAllocation,
12472  bool& prefersDedicatedAllocation) const
12473 {
12474 #if VMA_DEDICATED_ALLOCATION
12475  if(m_UseKhrDedicatedAllocation)
12476  {
12477  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12478  memReqInfo.image = hImage;
12479 
12480  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12481 
12482  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12483  memReq2.pNext = &memDedicatedReq;
12484 
12485  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12486 
12487  memReq = memReq2.memoryRequirements;
12488  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12489  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12490  }
12491  else
12492 #endif // #if VMA_DEDICATED_ALLOCATION
12493  {
12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12495  requiresDedicatedAllocation = false;
12496  prefersDedicatedAllocation = false;
12497  }
12498 }
12499 
12500 VkResult VmaAllocator_T::AllocateMemory(
12501  const VkMemoryRequirements& vkMemReq,
12502  bool requiresDedicatedAllocation,
12503  bool prefersDedicatedAllocation,
12504  VkBuffer dedicatedBuffer,
12505  VkImage dedicatedImage,
12506  const VmaAllocationCreateInfo& createInfo,
12507  VmaSuballocationType suballocType,
12508  VmaAllocation* pAllocation)
12509 {
12510  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12511 
12512  if(vkMemReq.size == 0)
12513  {
12514  return VK_ERROR_VALIDATION_FAILED_EXT;
12515  }
12516  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12517  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12518  {
12519  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12521  }
12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12524  {
12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12527  }
12528  if(requiresDedicatedAllocation)
12529  {
12530  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12531  {
12532  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12534  }
12535  if(createInfo.pool != VK_NULL_HANDLE)
12536  {
12537  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12539  }
12540  }
12541  if((createInfo.pool != VK_NULL_HANDLE) &&
12542  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12543  {
12544  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12546  }
12547 
12548  if(createInfo.pool != VK_NULL_HANDLE)
12549  {
12550  const VkDeviceSize alignmentForPool = VMA_MAX(
12551  vkMemReq.alignment,
12552  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12553  return createInfo.pool->m_BlockVector.Allocate(
12554  createInfo.pool,
12555  m_CurrentFrameIndex.load(),
12556  vkMemReq.size,
12557  alignmentForPool,
12558  createInfo,
12559  suballocType,
12560  pAllocation);
12561  }
12562  else
12563  {
12564  // Bit mask of memory Vulkan types acceptable for this allocation.
12565  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12566  uint32_t memTypeIndex = UINT32_MAX;
12567  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12568  if(res == VK_SUCCESS)
12569  {
12570  VkDeviceSize alignmentForMemType = VMA_MAX(
12571  vkMemReq.alignment,
12572  GetMemoryTypeMinAlignment(memTypeIndex));
12573 
12574  res = AllocateMemoryOfType(
12575  vkMemReq.size,
12576  alignmentForMemType,
12577  requiresDedicatedAllocation || prefersDedicatedAllocation,
12578  dedicatedBuffer,
12579  dedicatedImage,
12580  createInfo,
12581  memTypeIndex,
12582  suballocType,
12583  pAllocation);
12584  // Succeeded on first try.
12585  if(res == VK_SUCCESS)
12586  {
12587  return res;
12588  }
12589  // Allocation from this memory type failed. Try other compatible memory types.
12590  else
12591  {
12592  for(;;)
12593  {
12594  // Remove old memTypeIndex from list of possibilities.
12595  memoryTypeBits &= ~(1u << memTypeIndex);
12596  // Find alternative memTypeIndex.
12597  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12598  if(res == VK_SUCCESS)
12599  {
12600  alignmentForMemType = VMA_MAX(
12601  vkMemReq.alignment,
12602  GetMemoryTypeMinAlignment(memTypeIndex));
12603 
12604  res = AllocateMemoryOfType(
12605  vkMemReq.size,
12606  alignmentForMemType,
12607  requiresDedicatedAllocation || prefersDedicatedAllocation,
12608  dedicatedBuffer,
12609  dedicatedImage,
12610  createInfo,
12611  memTypeIndex,
12612  suballocType,
12613  pAllocation);
12614  // Allocation from this alternative memory type succeeded.
12615  if(res == VK_SUCCESS)
12616  {
12617  return res;
12618  }
12619  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12620  }
12621  // No other matching memory type index could be found.
12622  else
12623  {
12624  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12625  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12626  }
12627  }
12628  }
12629  }
12630  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12631  else
12632  return res;
12633  }
12634 }
12635 
12636 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12637 {
12638  VMA_ASSERT(allocation);
12639 
12640  if(TouchAllocation(allocation))
12641  {
12642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12643  {
12644  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12645  }
12646 
12647  switch(allocation->GetType())
12648  {
12649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12650  {
12651  VmaBlockVector* pBlockVector = VMA_NULL;
12652  VmaPool hPool = allocation->GetPool();
12653  if(hPool != VK_NULL_HANDLE)
12654  {
12655  pBlockVector = &hPool->m_BlockVector;
12656  }
12657  else
12658  {
12659  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12660  pBlockVector = m_pBlockVectors[memTypeIndex];
12661  }
12662  pBlockVector->Free(allocation);
12663  }
12664  break;
12665  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12666  FreeDedicatedMemory(allocation);
12667  break;
12668  default:
12669  VMA_ASSERT(0);
12670  }
12671  }
12672 
12673  allocation->SetUserData(this, VMA_NULL);
12674  vma_delete(this, allocation);
12675 }
12676 
12677 VkResult VmaAllocator_T::ResizeAllocation(
12678  const VmaAllocation alloc,
12679  VkDeviceSize newSize)
12680 {
12681  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12682  {
12683  return VK_ERROR_VALIDATION_FAILED_EXT;
12684  }
12685  if(newSize == alloc->GetSize())
12686  {
12687  return VK_SUCCESS;
12688  }
12689 
12690  switch(alloc->GetType())
12691  {
12692  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12693  return VK_ERROR_FEATURE_NOT_PRESENT;
12694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12695  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12696  {
12697  alloc->ChangeSize(newSize);
12698  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12699  return VK_SUCCESS;
12700  }
12701  else
12702  {
12703  return VK_ERROR_OUT_OF_POOL_MEMORY;
12704  }
12705  default:
12706  VMA_ASSERT(0);
12707  return VK_ERROR_VALIDATION_FAILED_EXT;
12708  }
12709 }
12710 
12711 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12712 {
12713  // Initialize.
12714  InitStatInfo(pStats->total);
12715  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12716  InitStatInfo(pStats->memoryType[i]);
12717  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12718  InitStatInfo(pStats->memoryHeap[i]);
12719 
12720  // Process default pools.
12721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12722  {
12723  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12724  VMA_ASSERT(pBlockVector);
12725  pBlockVector->AddStats(pStats);
12726  }
12727 
12728  // Process custom pools.
12729  {
12730  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12731  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12732  {
12733  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12734  }
12735  }
12736 
12737  // Process dedicated allocations.
12738  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12739  {
12740  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12741  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12742  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12743  VMA_ASSERT(pDedicatedAllocVector);
12744  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12745  {
12746  VmaStatInfo allocationStatInfo;
12747  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12748  VmaAddStatInfo(pStats->total, allocationStatInfo);
12749  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12750  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12751  }
12752  }
12753 
12754  // Postprocess.
12755  VmaPostprocessCalcStatInfo(pStats->total);
12756  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12757  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12758  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12759  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12760 }
12761 
12762 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12763 
12764 VkResult VmaAllocator_T::Defragment(
12765  VmaAllocation* pAllocations,
12766  size_t allocationCount,
12767  VkBool32* pAllocationsChanged,
12768  const VmaDefragmentationInfo* pDefragmentationInfo,
12769  VmaDefragmentationStats* pDefragmentationStats)
12770 {
12771  if(pAllocationsChanged != VMA_NULL)
12772  {
12773  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12774  }
12775  if(pDefragmentationStats != VMA_NULL)
12776  {
12777  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12778  }
12779 
12780  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12781 
12782  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12783 
12784  const size_t poolCount = m_Pools.size();
12785 
12786  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12787  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12788  {
12789  VmaAllocation hAlloc = pAllocations[allocIndex];
12790  VMA_ASSERT(hAlloc);
12791  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12792  // DedicatedAlloc cannot be defragmented.
12793  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12794  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12795  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12796  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12797  // Lost allocation cannot be defragmented.
12798  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12799  {
12800  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12801 
12802  const VmaPool hAllocPool = hAlloc->GetPool();
12803  // This allocation belongs to custom pool.
12804  if(hAllocPool != VK_NULL_HANDLE)
12805  {
12806  // Pools with linear or buddy algorithm are not defragmented.
12807  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12808  {
12809  pAllocBlockVector = &hAllocPool->m_BlockVector;
12810  }
12811  }
12812  // This allocation belongs to general pool.
12813  else
12814  {
12815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12816  }
12817 
12818  if(pAllocBlockVector != VMA_NULL)
12819  {
12820  VmaDefragmentator* const pDefragmentator =
12821  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12822  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12823  &pAllocationsChanged[allocIndex] : VMA_NULL;
12824  pDefragmentator->AddAllocation(hAlloc, pChanged);
12825  }
12826  }
12827  }
12828 
12829  VkResult result = VK_SUCCESS;
12830 
12831  // ======== Main processing.
12832 
12833  VkDeviceSize maxBytesToMove = SIZE_MAX;
12834  uint32_t maxAllocationsToMove = UINT32_MAX;
12835  if(pDefragmentationInfo != VMA_NULL)
12836  {
12837  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12838  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12839  }
12840 
12841  // Process standard memory.
12842  for(uint32_t memTypeIndex = 0;
12843  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12844  ++memTypeIndex)
12845  {
12846  // Only HOST_VISIBLE memory types can be defragmented.
12847  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12848  {
12849  result = m_pBlockVectors[memTypeIndex]->Defragment(
12850  pDefragmentationStats,
12851  maxBytesToMove,
12852  maxAllocationsToMove);
12853  }
12854  }
12855 
12856  // Process custom pools.
12857  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12858  {
12859  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12860  pDefragmentationStats,
12861  maxBytesToMove,
12862  maxAllocationsToMove);
12863  }
12864 
12865  // ======== Destroy defragmentators.
12866 
12867  // Process custom pools.
12868  for(size_t poolIndex = poolCount; poolIndex--; )
12869  {
12870  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12871  }
12872 
12873  // Process standard memory.
12874  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12875  {
12876  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12877  {
12878  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12879  }
12880  }
12881 
12882  return result;
12883 }
12884 
12885 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12886 {
12887  if(hAllocation->CanBecomeLost())
12888  {
12889  /*
12890  Warning: This is a carefully designed algorithm.
12891  Do not modify unless you really know what you're doing :)
12892  */
12893  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12894  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12895  for(;;)
12896  {
12897  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12898  {
12899  pAllocationInfo->memoryType = UINT32_MAX;
12900  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12901  pAllocationInfo->offset = 0;
12902  pAllocationInfo->size = hAllocation->GetSize();
12903  pAllocationInfo->pMappedData = VMA_NULL;
12904  pAllocationInfo->pUserData = hAllocation->GetUserData();
12905  return;
12906  }
12907  else if(localLastUseFrameIndex == localCurrFrameIndex)
12908  {
12909  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12910  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12911  pAllocationInfo->offset = hAllocation->GetOffset();
12912  pAllocationInfo->size = hAllocation->GetSize();
12913  pAllocationInfo->pMappedData = VMA_NULL;
12914  pAllocationInfo->pUserData = hAllocation->GetUserData();
12915  return;
12916  }
12917  else // Last use time earlier than current time.
12918  {
12919  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12920  {
12921  localLastUseFrameIndex = localCurrFrameIndex;
12922  }
12923  }
12924  }
12925  }
12926  else
12927  {
12928 #if VMA_STATS_STRING_ENABLED
12929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12931  for(;;)
12932  {
12933  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12934  if(localLastUseFrameIndex == localCurrFrameIndex)
12935  {
12936  break;
12937  }
12938  else // Last use time earlier than current time.
12939  {
12940  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12941  {
12942  localLastUseFrameIndex = localCurrFrameIndex;
12943  }
12944  }
12945  }
12946 #endif
12947 
12948  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12949  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12950  pAllocationInfo->offset = hAllocation->GetOffset();
12951  pAllocationInfo->size = hAllocation->GetSize();
12952  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12953  pAllocationInfo->pUserData = hAllocation->GetUserData();
12954  }
12955 }
12956 
12957 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12958 {
12959  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12960  if(hAllocation->CanBecomeLost())
12961  {
12962  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12963  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12964  for(;;)
12965  {
12966  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12967  {
12968  return false;
12969  }
12970  else if(localLastUseFrameIndex == localCurrFrameIndex)
12971  {
12972  return true;
12973  }
12974  else // Last use time earlier than current time.
12975  {
12976  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12977  {
12978  localLastUseFrameIndex = localCurrFrameIndex;
12979  }
12980  }
12981  }
12982  }
12983  else
12984  {
12985 #if VMA_STATS_STRING_ENABLED
12986  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12987  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12988  for(;;)
12989  {
12990  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12991  if(localLastUseFrameIndex == localCurrFrameIndex)
12992  {
12993  break;
12994  }
12995  else // Last use time earlier than current time.
12996  {
12997  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12998  {
12999  localLastUseFrameIndex = localCurrFrameIndex;
13000  }
13001  }
13002  }
13003 #endif
13004 
13005  return true;
13006  }
13007 }
13008 
13009 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13010 {
13011  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13012 
13013  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13014 
13015  if(newCreateInfo.maxBlockCount == 0)
13016  {
13017  newCreateInfo.maxBlockCount = SIZE_MAX;
13018  }
13019  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13020  {
13021  return VK_ERROR_INITIALIZATION_FAILED;
13022  }
13023 
13024  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13025 
13026  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13027 
13028  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13029  if(res != VK_SUCCESS)
13030  {
13031  vma_delete(this, *pPool);
13032  *pPool = VMA_NULL;
13033  return res;
13034  }
13035 
13036  // Add to m_Pools.
13037  {
13038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13039  (*pPool)->SetId(m_NextPoolId++);
13040  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13041  }
13042 
13043  return VK_SUCCESS;
13044 }
13045 
13046 void VmaAllocator_T::DestroyPool(VmaPool pool)
13047 {
13048  // Remove from m_Pools.
13049  {
13050  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13052  VMA_ASSERT(success && "Pool not found in Allocator.");
13053  }
13054 
13055  vma_delete(this, pool);
13056 }
13057 
13058 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13059 {
13060  pool->m_BlockVector.GetPoolStats(pPoolStats);
13061 }
13062 
13063 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13064 {
13065  m_CurrentFrameIndex.store(frameIndex);
13066 }
13067 
13068 void VmaAllocator_T::MakePoolAllocationsLost(
13069  VmaPool hPool,
13070  size_t* pLostAllocationCount)
13071 {
13072  hPool->m_BlockVector.MakePoolAllocationsLost(
13073  m_CurrentFrameIndex.load(),
13074  pLostAllocationCount);
13075 }
13076 
13077 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13078 {
13079  return hPool->m_BlockVector.CheckCorruption();
13080 }
13081 
13082 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13083 {
13084  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13085 
13086  // Process default pools.
13087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13088  {
13089  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13090  {
13091  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13092  VMA_ASSERT(pBlockVector);
13093  VkResult localRes = pBlockVector->CheckCorruption();
13094  switch(localRes)
13095  {
13096  case VK_ERROR_FEATURE_NOT_PRESENT:
13097  break;
13098  case VK_SUCCESS:
13099  finalRes = VK_SUCCESS;
13100  break;
13101  default:
13102  return localRes;
13103  }
13104  }
13105  }
13106 
13107  // Process custom pools.
13108  {
13109  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13110  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13111  {
13112  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13113  {
13114  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13115  switch(localRes)
13116  {
13117  case VK_ERROR_FEATURE_NOT_PRESENT:
13118  break;
13119  case VK_SUCCESS:
13120  finalRes = VK_SUCCESS;
13121  break;
13122  default:
13123  return localRes;
13124  }
13125  }
13126  }
13127  }
13128 
13129  return finalRes;
13130 }
13131 
13132 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13133 {
13134  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13135  (*pAllocation)->InitLost();
13136 }
13137 
13138 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13139 {
13140  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13141 
13142  VkResult res;
13143  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13144  {
13145  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13146  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13147  {
13148  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13149  if(res == VK_SUCCESS)
13150  {
13151  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13152  }
13153  }
13154  else
13155  {
13156  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13157  }
13158  }
13159  else
13160  {
13161  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13162  }
13163 
13164  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13165  {
13166  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13167  }
13168 
13169  return res;
13170 }
13171 
13172 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13173 {
13174  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13175  {
13176  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13177  }
13178 
13179  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13180 
13181  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13182  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13183  {
13184  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13185  m_HeapSizeLimit[heapIndex] += size;
13186  }
13187 }
13188 
13189 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13190 {
13191  if(hAllocation->CanBecomeLost())
13192  {
13193  return VK_ERROR_MEMORY_MAP_FAILED;
13194  }
13195 
13196  switch(hAllocation->GetType())
13197  {
13198  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13199  {
13200  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13201  char *pBytes = VMA_NULL;
13202  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13203  if(res == VK_SUCCESS)
13204  {
13205  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13206  hAllocation->BlockAllocMap();
13207  }
13208  return res;
13209  }
13210  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13211  return hAllocation->DedicatedAllocMap(this, ppData);
13212  default:
13213  VMA_ASSERT(0);
13214  return VK_ERROR_MEMORY_MAP_FAILED;
13215  }
13216 }
13217 
13218 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13219 {
13220  switch(hAllocation->GetType())
13221  {
13222  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13223  {
13224  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13225  hAllocation->BlockAllocUnmap();
13226  pBlock->Unmap(this, 1);
13227  }
13228  break;
13229  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13230  hAllocation->DedicatedAllocUnmap(this);
13231  break;
13232  default:
13233  VMA_ASSERT(0);
13234  }
13235 }
13236 
13237 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13238 {
13239  VkResult res = VK_SUCCESS;
13240  switch(hAllocation->GetType())
13241  {
13242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13243  res = GetVulkanFunctions().vkBindBufferMemory(
13244  m_hDevice,
13245  hBuffer,
13246  hAllocation->GetMemory(),
13247  0); //memoryOffset
13248  break;
13249  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13250  {
13251  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13252  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13253  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13254  break;
13255  }
13256  default:
13257  VMA_ASSERT(0);
13258  }
13259  return res;
13260 }
13261 
13262 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13263 {
13264  VkResult res = VK_SUCCESS;
13265  switch(hAllocation->GetType())
13266  {
13267  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13268  res = GetVulkanFunctions().vkBindImageMemory(
13269  m_hDevice,
13270  hImage,
13271  hAllocation->GetMemory(),
13272  0); //memoryOffset
13273  break;
13274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13275  {
13276  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13277  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13278  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13279  break;
13280  }
13281  default:
13282  VMA_ASSERT(0);
13283  }
13284  return res;
13285 }
13286 
13287 void VmaAllocator_T::FlushOrInvalidateAllocation(
13288  VmaAllocation hAllocation,
13289  VkDeviceSize offset, VkDeviceSize size,
13290  VMA_CACHE_OPERATION op)
13291 {
13292  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13293  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13294  {
13295  const VkDeviceSize allocationSize = hAllocation->GetSize();
13296  VMA_ASSERT(offset <= allocationSize);
13297 
13298  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13299 
13300  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13301  memRange.memory = hAllocation->GetMemory();
13302 
13303  switch(hAllocation->GetType())
13304  {
13305  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13307  if(size == VK_WHOLE_SIZE)
13308  {
13309  memRange.size = allocationSize - memRange.offset;
13310  }
13311  else
13312  {
13313  VMA_ASSERT(offset + size <= allocationSize);
13314  memRange.size = VMA_MIN(
13315  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13316  allocationSize - memRange.offset);
13317  }
13318  break;
13319 
13320  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13321  {
13322  // 1. Still within this allocation.
13323  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13324  if(size == VK_WHOLE_SIZE)
13325  {
13326  size = allocationSize - offset;
13327  }
13328  else
13329  {
13330  VMA_ASSERT(offset + size <= allocationSize);
13331  }
13332  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13333 
13334  // 2. Adjust to whole block.
13335  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13336  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13337  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13338  memRange.offset += allocationOffset;
13339  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13340 
13341  break;
13342  }
13343 
13344  default:
13345  VMA_ASSERT(0);
13346  }
13347 
13348  switch(op)
13349  {
13350  case VMA_CACHE_FLUSH:
13351  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13352  break;
13353  case VMA_CACHE_INVALIDATE:
13354  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13355  break;
13356  default:
13357  VMA_ASSERT(0);
13358  }
13359  }
13360  // else: Just ignore this call.
13361 }
13362 
13363 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13364 {
13365  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13366 
13367  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13368  {
13369  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13370  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13371  VMA_ASSERT(pDedicatedAllocations);
13372  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13373  VMA_ASSERT(success);
13374  }
13375 
13376  VkDeviceMemory hMemory = allocation->GetMemory();
13377 
13378  /*
13379  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13380  before vkFreeMemory.
13381 
13382  if(allocation->GetMappedData() != VMA_NULL)
13383  {
13384  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13385  }
13386  */
13387 
13388  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13389 
13390  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13391 }
13392 
13393 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13394 {
13395  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13396  !hAllocation->CanBecomeLost() &&
13397  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13398  {
13399  void* pData = VMA_NULL;
13400  VkResult res = Map(hAllocation, &pData);
13401  if(res == VK_SUCCESS)
13402  {
13403  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13404  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13405  Unmap(hAllocation);
13406  }
13407  else
13408  {
13409  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13410  }
13411  }
13412 }
13413 
13414 #if VMA_STATS_STRING_ENABLED
13415 
13416 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13417 {
13418  bool dedicatedAllocationsStarted = false;
13419  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13420  {
13421  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13422  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13423  VMA_ASSERT(pDedicatedAllocVector);
13424  if(pDedicatedAllocVector->empty() == false)
13425  {
13426  if(dedicatedAllocationsStarted == false)
13427  {
13428  dedicatedAllocationsStarted = true;
13429  json.WriteString("DedicatedAllocations");
13430  json.BeginObject();
13431  }
13432 
13433  json.BeginString("Type ");
13434  json.ContinueString(memTypeIndex);
13435  json.EndString();
13436 
13437  json.BeginArray();
13438 
13439  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13440  {
13441  json.BeginObject(true);
13442  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13443  hAlloc->PrintParameters(json);
13444  json.EndObject();
13445  }
13446 
13447  json.EndArray();
13448  }
13449  }
13450  if(dedicatedAllocationsStarted)
13451  {
13452  json.EndObject();
13453  }
13454 
13455  {
13456  bool allocationsStarted = false;
13457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13458  {
13459  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13460  {
13461  if(allocationsStarted == false)
13462  {
13463  allocationsStarted = true;
13464  json.WriteString("DefaultPools");
13465  json.BeginObject();
13466  }
13467 
13468  json.BeginString("Type ");
13469  json.ContinueString(memTypeIndex);
13470  json.EndString();
13471 
13472  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13473  }
13474  }
13475  if(allocationsStarted)
13476  {
13477  json.EndObject();
13478  }
13479  }
13480 
13481  // Custom pools
13482  {
13483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13484  const size_t poolCount = m_Pools.size();
13485  if(poolCount > 0)
13486  {
13487  json.WriteString("Pools");
13488  json.BeginObject();
13489  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13490  {
13491  json.BeginString();
13492  json.ContinueString(m_Pools[poolIndex]->GetId());
13493  json.EndString();
13494 
13495  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13496  }
13497  json.EndObject();
13498  }
13499  }
13500 }
13501 
13502 #endif // #if VMA_STATS_STRING_ENABLED
13503 
13505 // Public interface
13506 
13507 VkResult vmaCreateAllocator(
13508  const VmaAllocatorCreateInfo* pCreateInfo,
13509  VmaAllocator* pAllocator)
13510 {
13511  VMA_ASSERT(pCreateInfo && pAllocator);
13512  VMA_DEBUG_LOG("vmaCreateAllocator");
13513  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13514  return (*pAllocator)->Init(pCreateInfo);
13515 }
13516 
13517 void vmaDestroyAllocator(
13518  VmaAllocator allocator)
13519 {
13520  if(allocator != VK_NULL_HANDLE)
13521  {
13522  VMA_DEBUG_LOG("vmaDestroyAllocator");
13523  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13524  vma_delete(&allocationCallbacks, allocator);
13525  }
13526 }
13527 
13529  VmaAllocator allocator,
13530  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13531 {
13532  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13533  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13534 }
13535 
13537  VmaAllocator allocator,
13538  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13539 {
13540  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13541  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13542 }
13543 
13545  VmaAllocator allocator,
13546  uint32_t memoryTypeIndex,
13547  VkMemoryPropertyFlags* pFlags)
13548 {
13549  VMA_ASSERT(allocator && pFlags);
13550  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13551  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13552 }
13553 
13555  VmaAllocator allocator,
13556  uint32_t frameIndex)
13557 {
13558  VMA_ASSERT(allocator);
13559  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13560 
13561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13562 
13563  allocator->SetCurrentFrameIndex(frameIndex);
13564 }
13565 
13566 void vmaCalculateStats(
13567  VmaAllocator allocator,
13568  VmaStats* pStats)
13569 {
13570  VMA_ASSERT(allocator && pStats);
13571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13572  allocator->CalculateStats(pStats);
13573 }
13574 
13575 #if VMA_STATS_STRING_ENABLED
13576 
13577 void vmaBuildStatsString(
13578  VmaAllocator allocator,
13579  char** ppStatsString,
13580  VkBool32 detailedMap)
13581 {
13582  VMA_ASSERT(allocator && ppStatsString);
13583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13584 
13585  VmaStringBuilder sb(allocator);
13586  {
13587  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13588  json.BeginObject();
13589 
13590  VmaStats stats;
13591  allocator->CalculateStats(&stats);
13592 
13593  json.WriteString("Total");
13594  VmaPrintStatInfo(json, stats.total);
13595 
13596  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13597  {
13598  json.BeginString("Heap ");
13599  json.ContinueString(heapIndex);
13600  json.EndString();
13601  json.BeginObject();
13602 
13603  json.WriteString("Size");
13604  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13605 
13606  json.WriteString("Flags");
13607  json.BeginArray(true);
13608  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13609  {
13610  json.WriteString("DEVICE_LOCAL");
13611  }
13612  json.EndArray();
13613 
13614  if(stats.memoryHeap[heapIndex].blockCount > 0)
13615  {
13616  json.WriteString("Stats");
13617  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13618  }
13619 
13620  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13621  {
13622  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13623  {
13624  json.BeginString("Type ");
13625  json.ContinueString(typeIndex);
13626  json.EndString();
13627 
13628  json.BeginObject();
13629 
13630  json.WriteString("Flags");
13631  json.BeginArray(true);
13632  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13633  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13634  {
13635  json.WriteString("DEVICE_LOCAL");
13636  }
13637  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13638  {
13639  json.WriteString("HOST_VISIBLE");
13640  }
13641  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13642  {
13643  json.WriteString("HOST_COHERENT");
13644  }
13645  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13646  {
13647  json.WriteString("HOST_CACHED");
13648  }
13649  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13650  {
13651  json.WriteString("LAZILY_ALLOCATED");
13652  }
13653  json.EndArray();
13654 
13655  if(stats.memoryType[typeIndex].blockCount > 0)
13656  {
13657  json.WriteString("Stats");
13658  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13659  }
13660 
13661  json.EndObject();
13662  }
13663  }
13664 
13665  json.EndObject();
13666  }
13667  if(detailedMap == VK_TRUE)
13668  {
13669  allocator->PrintDetailedMap(json);
13670  }
13671 
13672  json.EndObject();
13673  }
13674 
13675  const size_t len = sb.GetLength();
13676  char* const pChars = vma_new_array(allocator, char, len + 1);
13677  if(len > 0)
13678  {
13679  memcpy(pChars, sb.GetData(), len);
13680  }
13681  pChars[len] = '\0';
13682  *ppStatsString = pChars;
13683 }
13684 
13685 void vmaFreeStatsString(
13686  VmaAllocator allocator,
13687  char* pStatsString)
13688 {
13689  if(pStatsString != VMA_NULL)
13690  {
13691  VMA_ASSERT(allocator);
13692  size_t len = strlen(pStatsString);
13693  vma_delete_array(allocator, pStatsString, len + 1);
13694  }
13695 }
13696 
13697 #endif // #if VMA_STATS_STRING_ENABLED
13698 
13699 /*
13700 This function is not protected by any mutex because it just reads immutable data.
13701 */
13702 VkResult vmaFindMemoryTypeIndex(
13703  VmaAllocator allocator,
13704  uint32_t memoryTypeBits,
13705  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13706  uint32_t* pMemoryTypeIndex)
13707 {
13708  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13709  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13710  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13711 
13712  if(pAllocationCreateInfo->memoryTypeBits != 0)
13713  {
13714  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13715  }
13716 
13717  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13718  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13719 
13720  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13721  if(mapped)
13722  {
13723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13724  }
13725 
13726  // Convert usage to requiredFlags and preferredFlags.
13727  switch(pAllocationCreateInfo->usage)
13728  {
13730  break;
13732  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13733  {
13734  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13735  }
13736  break;
13738  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13739  break;
13741  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13742  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13743  {
13744  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13745  }
13746  break;
13748  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13749  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13750  break;
13751  default:
13752  break;
13753  }
13754 
13755  *pMemoryTypeIndex = UINT32_MAX;
13756  uint32_t minCost = UINT32_MAX;
13757  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13758  memTypeIndex < allocator->GetMemoryTypeCount();
13759  ++memTypeIndex, memTypeBit <<= 1)
13760  {
13761  // This memory type is acceptable according to memoryTypeBits bitmask.
13762  if((memTypeBit & memoryTypeBits) != 0)
13763  {
13764  const VkMemoryPropertyFlags currFlags =
13765  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13766  // This memory type contains requiredFlags.
13767  if((requiredFlags & ~currFlags) == 0)
13768  {
13769  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13770  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13771  // Remember memory type with lowest cost.
13772  if(currCost < minCost)
13773  {
13774  *pMemoryTypeIndex = memTypeIndex;
13775  if(currCost == 0)
13776  {
13777  return VK_SUCCESS;
13778  }
13779  minCost = currCost;
13780  }
13781  }
13782  }
13783  }
13784  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13785 }
13786 
13788  VmaAllocator allocator,
13789  const VkBufferCreateInfo* pBufferCreateInfo,
13790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13791  uint32_t* pMemoryTypeIndex)
13792 {
13793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13794  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13797 
13798  const VkDevice hDev = allocator->m_hDevice;
13799  VkBuffer hBuffer = VK_NULL_HANDLE;
13800  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13801  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13802  if(res == VK_SUCCESS)
13803  {
13804  VkMemoryRequirements memReq = {};
13805  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13806  hDev, hBuffer, &memReq);
13807 
13808  res = vmaFindMemoryTypeIndex(
13809  allocator,
13810  memReq.memoryTypeBits,
13811  pAllocationCreateInfo,
13812  pMemoryTypeIndex);
13813 
13814  allocator->GetVulkanFunctions().vkDestroyBuffer(
13815  hDev, hBuffer, allocator->GetAllocationCallbacks());
13816  }
13817  return res;
13818 }
13819 
13821  VmaAllocator allocator,
13822  const VkImageCreateInfo* pImageCreateInfo,
13823  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13824  uint32_t* pMemoryTypeIndex)
13825 {
13826  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13827  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13828  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13829  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13830 
13831  const VkDevice hDev = allocator->m_hDevice;
13832  VkImage hImage = VK_NULL_HANDLE;
13833  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13834  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13835  if(res == VK_SUCCESS)
13836  {
13837  VkMemoryRequirements memReq = {};
13838  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13839  hDev, hImage, &memReq);
13840 
13841  res = vmaFindMemoryTypeIndex(
13842  allocator,
13843  memReq.memoryTypeBits,
13844  pAllocationCreateInfo,
13845  pMemoryTypeIndex);
13846 
13847  allocator->GetVulkanFunctions().vkDestroyImage(
13848  hDev, hImage, allocator->GetAllocationCallbacks());
13849  }
13850  return res;
13851 }
13852 
13853 VkResult vmaCreatePool(
13854  VmaAllocator allocator,
13855  const VmaPoolCreateInfo* pCreateInfo,
13856  VmaPool* pPool)
13857 {
13858  VMA_ASSERT(allocator && pCreateInfo && pPool);
13859 
13860  VMA_DEBUG_LOG("vmaCreatePool");
13861 
13862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13863 
13864  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13865 
13866 #if VMA_RECORDING_ENABLED
13867  if(allocator->GetRecorder() != VMA_NULL)
13868  {
13869  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13870  }
13871 #endif
13872 
13873  return res;
13874 }
13875 
13876 void vmaDestroyPool(
13877  VmaAllocator allocator,
13878  VmaPool pool)
13879 {
13880  VMA_ASSERT(allocator);
13881 
13882  if(pool == VK_NULL_HANDLE)
13883  {
13884  return;
13885  }
13886 
13887  VMA_DEBUG_LOG("vmaDestroyPool");
13888 
13889  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13890 
13891 #if VMA_RECORDING_ENABLED
13892  if(allocator->GetRecorder() != VMA_NULL)
13893  {
13894  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13895  }
13896 #endif
13897 
13898  allocator->DestroyPool(pool);
13899 }
13900 
13901 void vmaGetPoolStats(
13902  VmaAllocator allocator,
13903  VmaPool pool,
13904  VmaPoolStats* pPoolStats)
13905 {
13906  VMA_ASSERT(allocator && pool && pPoolStats);
13907 
13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13909 
13910  allocator->GetPoolStats(pool, pPoolStats);
13911 }
13912 
13914  VmaAllocator allocator,
13915  VmaPool pool,
13916  size_t* pLostAllocationCount)
13917 {
13918  VMA_ASSERT(allocator && pool);
13919 
13920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13921 
13922 #if VMA_RECORDING_ENABLED
13923  if(allocator->GetRecorder() != VMA_NULL)
13924  {
13925  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13926  }
13927 #endif
13928 
13929  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13930 }
13931 
13932 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13933 {
13934  VMA_ASSERT(allocator && pool);
13935 
13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13937 
13938  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13939 
13940  return allocator->CheckPoolCorruption(pool);
13941 }
13942 
13943 VkResult vmaAllocateMemory(
13944  VmaAllocator allocator,
13945  const VkMemoryRequirements* pVkMemoryRequirements,
13946  const VmaAllocationCreateInfo* pCreateInfo,
13947  VmaAllocation* pAllocation,
13948  VmaAllocationInfo* pAllocationInfo)
13949 {
13950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13951 
13952  VMA_DEBUG_LOG("vmaAllocateMemory");
13953 
13954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13955 
13956  VkResult result = allocator->AllocateMemory(
13957  *pVkMemoryRequirements,
13958  false, // requiresDedicatedAllocation
13959  false, // prefersDedicatedAllocation
13960  VK_NULL_HANDLE, // dedicatedBuffer
13961  VK_NULL_HANDLE, // dedicatedImage
13962  *pCreateInfo,
13963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13964  pAllocation);
13965 
13966 #if VMA_RECORDING_ENABLED
13967  if(allocator->GetRecorder() != VMA_NULL)
13968  {
13969  allocator->GetRecorder()->RecordAllocateMemory(
13970  allocator->GetCurrentFrameIndex(),
13971  *pVkMemoryRequirements,
13972  *pCreateInfo,
13973  *pAllocation);
13974  }
13975 #endif
13976 
13977  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13978  {
13979  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13980  }
13981 
13982  return result;
13983 }
13984 
13986  VmaAllocator allocator,
13987  VkBuffer buffer,
13988  const VmaAllocationCreateInfo* pCreateInfo,
13989  VmaAllocation* pAllocation,
13990  VmaAllocationInfo* pAllocationInfo)
13991 {
13992  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13993 
13994  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13995 
13996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13997 
13998  VkMemoryRequirements vkMemReq = {};
13999  bool requiresDedicatedAllocation = false;
14000  bool prefersDedicatedAllocation = false;
14001  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14002  requiresDedicatedAllocation,
14003  prefersDedicatedAllocation);
14004 
14005  VkResult result = allocator->AllocateMemory(
14006  vkMemReq,
14007  requiresDedicatedAllocation,
14008  prefersDedicatedAllocation,
14009  buffer, // dedicatedBuffer
14010  VK_NULL_HANDLE, // dedicatedImage
14011  *pCreateInfo,
14012  VMA_SUBALLOCATION_TYPE_BUFFER,
14013  pAllocation);
14014 
14015 #if VMA_RECORDING_ENABLED
14016  if(allocator->GetRecorder() != VMA_NULL)
14017  {
14018  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14019  allocator->GetCurrentFrameIndex(),
14020  vkMemReq,
14021  requiresDedicatedAllocation,
14022  prefersDedicatedAllocation,
14023  *pCreateInfo,
14024  *pAllocation);
14025  }
14026 #endif
14027 
14028  if(pAllocationInfo && result == VK_SUCCESS)
14029  {
14030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14031  }
14032 
14033  return result;
14034 }
14035 
14036 VkResult vmaAllocateMemoryForImage(
14037  VmaAllocator allocator,
14038  VkImage image,
14039  const VmaAllocationCreateInfo* pCreateInfo,
14040  VmaAllocation* pAllocation,
14041  VmaAllocationInfo* pAllocationInfo)
14042 {
14043  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14044 
14045  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14046 
14047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14048 
14049  VkMemoryRequirements vkMemReq = {};
14050  bool requiresDedicatedAllocation = false;
14051  bool prefersDedicatedAllocation = false;
14052  allocator->GetImageMemoryRequirements(image, vkMemReq,
14053  requiresDedicatedAllocation, prefersDedicatedAllocation);
14054 
14055  VkResult result = allocator->AllocateMemory(
14056  vkMemReq,
14057  requiresDedicatedAllocation,
14058  prefersDedicatedAllocation,
14059  VK_NULL_HANDLE, // dedicatedBuffer
14060  image, // dedicatedImage
14061  *pCreateInfo,
14062  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14063  pAllocation);
14064 
14065 #if VMA_RECORDING_ENABLED
14066  if(allocator->GetRecorder() != VMA_NULL)
14067  {
14068  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14069  allocator->GetCurrentFrameIndex(),
14070  vkMemReq,
14071  requiresDedicatedAllocation,
14072  prefersDedicatedAllocation,
14073  *pCreateInfo,
14074  *pAllocation);
14075  }
14076 #endif
14077 
14078  if(pAllocationInfo && result == VK_SUCCESS)
14079  {
14080  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14081  }
14082 
14083  return result;
14084 }
14085 
14086 void vmaFreeMemory(
14087  VmaAllocator allocator,
14088  VmaAllocation allocation)
14089 {
14090  VMA_ASSERT(allocator);
14091 
14092  if(allocation == VK_NULL_HANDLE)
14093  {
14094  return;
14095  }
14096 
14097  VMA_DEBUG_LOG("vmaFreeMemory");
14098 
14099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14100 
14101 #if VMA_RECORDING_ENABLED
14102  if(allocator->GetRecorder() != VMA_NULL)
14103  {
14104  allocator->GetRecorder()->RecordFreeMemory(
14105  allocator->GetCurrentFrameIndex(),
14106  allocation);
14107  }
14108 #endif
14109 
14110  allocator->FreeMemory(allocation);
14111 }
14112 
14113 VkResult vmaResizeAllocation(
14114  VmaAllocator allocator,
14115  VmaAllocation allocation,
14116  VkDeviceSize newSize)
14117 {
14118  VMA_ASSERT(allocator && allocation);
14119 
14120  VMA_DEBUG_LOG("vmaResizeAllocation");
14121 
14122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14123 
14124 #if VMA_RECORDING_ENABLED
14125  if(allocator->GetRecorder() != VMA_NULL)
14126  {
14127  allocator->GetRecorder()->RecordResizeAllocation(
14128  allocator->GetCurrentFrameIndex(),
14129  allocation,
14130  newSize);
14131  }
14132 #endif
14133 
14134  return allocator->ResizeAllocation(allocation, newSize);
14135 }
14136 
14138  VmaAllocator allocator,
14139  VmaAllocation allocation,
14140  VmaAllocationInfo* pAllocationInfo)
14141 {
14142  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14143 
14144  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14145 
14146 #if VMA_RECORDING_ENABLED
14147  if(allocator->GetRecorder() != VMA_NULL)
14148  {
14149  allocator->GetRecorder()->RecordGetAllocationInfo(
14150  allocator->GetCurrentFrameIndex(),
14151  allocation);
14152  }
14153 #endif
14154 
14155  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14156 }
14157 
14158 VkBool32 vmaTouchAllocation(
14159  VmaAllocator allocator,
14160  VmaAllocation allocation)
14161 {
14162  VMA_ASSERT(allocator && allocation);
14163 
14164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14165 
14166 #if VMA_RECORDING_ENABLED
14167  if(allocator->GetRecorder() != VMA_NULL)
14168  {
14169  allocator->GetRecorder()->RecordTouchAllocation(
14170  allocator->GetCurrentFrameIndex(),
14171  allocation);
14172  }
14173 #endif
14174 
14175  return allocator->TouchAllocation(allocation);
14176 }
14177 
14179  VmaAllocator allocator,
14180  VmaAllocation allocation,
14181  void* pUserData)
14182 {
14183  VMA_ASSERT(allocator && allocation);
14184 
14185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14186 
14187  allocation->SetUserData(allocator, pUserData);
14188 
14189 #if VMA_RECORDING_ENABLED
14190  if(allocator->GetRecorder() != VMA_NULL)
14191  {
14192  allocator->GetRecorder()->RecordSetAllocationUserData(
14193  allocator->GetCurrentFrameIndex(),
14194  allocation,
14195  pUserData);
14196  }
14197 #endif
14198 }
14199 
14201  VmaAllocator allocator,
14202  VmaAllocation* pAllocation)
14203 {
14204  VMA_ASSERT(allocator && pAllocation);
14205 
14206  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14207 
14208  allocator->CreateLostAllocation(pAllocation);
14209 
14210 #if VMA_RECORDING_ENABLED
14211  if(allocator->GetRecorder() != VMA_NULL)
14212  {
14213  allocator->GetRecorder()->RecordCreateLostAllocation(
14214  allocator->GetCurrentFrameIndex(),
14215  *pAllocation);
14216  }
14217 #endif
14218 }
14219 
14220 VkResult vmaMapMemory(
14221  VmaAllocator allocator,
14222  VmaAllocation allocation,
14223  void** ppData)
14224 {
14225  VMA_ASSERT(allocator && allocation && ppData);
14226 
14227  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14228 
14229  VkResult res = allocator->Map(allocation, ppData);
14230 
14231 #if VMA_RECORDING_ENABLED
14232  if(allocator->GetRecorder() != VMA_NULL)
14233  {
14234  allocator->GetRecorder()->RecordMapMemory(
14235  allocator->GetCurrentFrameIndex(),
14236  allocation);
14237  }
14238 #endif
14239 
14240  return res;
14241 }
14242 
14243 void vmaUnmapMemory(
14244  VmaAllocator allocator,
14245  VmaAllocation allocation)
14246 {
14247  VMA_ASSERT(allocator && allocation);
14248 
14249  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14250 
14251 #if VMA_RECORDING_ENABLED
14252  if(allocator->GetRecorder() != VMA_NULL)
14253  {
14254  allocator->GetRecorder()->RecordUnmapMemory(
14255  allocator->GetCurrentFrameIndex(),
14256  allocation);
14257  }
14258 #endif
14259 
14260  allocator->Unmap(allocation);
14261 }
14262 
14263 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14264 {
14265  VMA_ASSERT(allocator && allocation);
14266 
14267  VMA_DEBUG_LOG("vmaFlushAllocation");
14268 
14269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14270 
14271  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14272 
14273 #if VMA_RECORDING_ENABLED
14274  if(allocator->GetRecorder() != VMA_NULL)
14275  {
14276  allocator->GetRecorder()->RecordFlushAllocation(
14277  allocator->GetCurrentFrameIndex(),
14278  allocation, offset, size);
14279  }
14280 #endif
14281 }
14282 
14283 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14284 {
14285  VMA_ASSERT(allocator && allocation);
14286 
14287  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14288 
14289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14290 
14291  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14292 
14293 #if VMA_RECORDING_ENABLED
14294  if(allocator->GetRecorder() != VMA_NULL)
14295  {
14296  allocator->GetRecorder()->RecordInvalidateAllocation(
14297  allocator->GetCurrentFrameIndex(),
14298  allocation, offset, size);
14299  }
14300 #endif
14301 }
14302 
14303 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14304 {
14305  VMA_ASSERT(allocator);
14306 
14307  VMA_DEBUG_LOG("vmaCheckCorruption");
14308 
14309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14310 
14311  return allocator->CheckCorruption(memoryTypeBits);
14312 }
14313 
14314 VkResult vmaDefragment(
14315  VmaAllocator allocator,
14316  VmaAllocation* pAllocations,
14317  size_t allocationCount,
14318  VkBool32* pAllocationsChanged,
14319  const VmaDefragmentationInfo *pDefragmentationInfo,
14320  VmaDefragmentationStats* pDefragmentationStats)
14321 {
14322  VMA_ASSERT(allocator && pAllocations);
14323 
14324  VMA_DEBUG_LOG("vmaDefragment");
14325 
14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14327 
14328  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14329 }
14330 
14331 VkResult vmaBindBufferMemory(
14332  VmaAllocator allocator,
14333  VmaAllocation allocation,
14334  VkBuffer buffer)
14335 {
14336  VMA_ASSERT(allocator && allocation && buffer);
14337 
14338  VMA_DEBUG_LOG("vmaBindBufferMemory");
14339 
14340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14341 
14342  return allocator->BindBufferMemory(allocation, buffer);
14343 }
14344 
14345 VkResult vmaBindImageMemory(
14346  VmaAllocator allocator,
14347  VmaAllocation allocation,
14348  VkImage image)
14349 {
14350  VMA_ASSERT(allocator && allocation && image);
14351 
14352  VMA_DEBUG_LOG("vmaBindImageMemory");
14353 
14354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14355 
14356  return allocator->BindImageMemory(allocation, image);
14357 }
14358 
14359 VkResult vmaCreateBuffer(
14360  VmaAllocator allocator,
14361  const VkBufferCreateInfo* pBufferCreateInfo,
14362  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14363  VkBuffer* pBuffer,
14364  VmaAllocation* pAllocation,
14365  VmaAllocationInfo* pAllocationInfo)
14366 {
14367  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14368 
14369  if(pBufferCreateInfo->size == 0)
14370  {
14371  return VK_ERROR_VALIDATION_FAILED_EXT;
14372  }
14373 
14374  VMA_DEBUG_LOG("vmaCreateBuffer");
14375 
14376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14377 
14378  *pBuffer = VK_NULL_HANDLE;
14379  *pAllocation = VK_NULL_HANDLE;
14380 
14381  // 1. Create VkBuffer.
14382  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14383  allocator->m_hDevice,
14384  pBufferCreateInfo,
14385  allocator->GetAllocationCallbacks(),
14386  pBuffer);
14387  if(res >= 0)
14388  {
14389  // 2. vkGetBufferMemoryRequirements.
14390  VkMemoryRequirements vkMemReq = {};
14391  bool requiresDedicatedAllocation = false;
14392  bool prefersDedicatedAllocation = false;
14393  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14394  requiresDedicatedAllocation, prefersDedicatedAllocation);
14395 
14396  // Make sure alignment requirements for specific buffer usages reported
14397  // in Physical Device Properties are included in alignment reported by memory requirements.
14398  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14399  {
14400  VMA_ASSERT(vkMemReq.alignment %
14401  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14402  }
14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14404  {
14405  VMA_ASSERT(vkMemReq.alignment %
14406  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14407  }
14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14409  {
14410  VMA_ASSERT(vkMemReq.alignment %
14411  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14412  }
14413 
14414  // 3. Allocate memory using allocator.
14415  res = allocator->AllocateMemory(
14416  vkMemReq,
14417  requiresDedicatedAllocation,
14418  prefersDedicatedAllocation,
14419  *pBuffer, // dedicatedBuffer
14420  VK_NULL_HANDLE, // dedicatedImage
14421  *pAllocationCreateInfo,
14422  VMA_SUBALLOCATION_TYPE_BUFFER,
14423  pAllocation);
14424 
14425 #if VMA_RECORDING_ENABLED
14426  if(allocator->GetRecorder() != VMA_NULL)
14427  {
14428  allocator->GetRecorder()->RecordCreateBuffer(
14429  allocator->GetCurrentFrameIndex(),
14430  *pBufferCreateInfo,
14431  *pAllocationCreateInfo,
14432  *pAllocation);
14433  }
14434 #endif
14435 
14436  if(res >= 0)
14437  {
14438  // 3. Bind buffer with memory.
14439  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14440  if(res >= 0)
14441  {
14442  // All steps succeeded.
14443  #if VMA_STATS_STRING_ENABLED
14444  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14445  #endif
14446  if(pAllocationInfo != VMA_NULL)
14447  {
14448  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14449  }
14450 
14451  return VK_SUCCESS;
14452  }
14453  allocator->FreeMemory(*pAllocation);
14454  *pAllocation = VK_NULL_HANDLE;
14455  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14456  *pBuffer = VK_NULL_HANDLE;
14457  return res;
14458  }
14459  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14460  *pBuffer = VK_NULL_HANDLE;
14461  return res;
14462  }
14463  return res;
14464 }
14465 
14466 void vmaDestroyBuffer(
14467  VmaAllocator allocator,
14468  VkBuffer buffer,
14469  VmaAllocation allocation)
14470 {
14471  VMA_ASSERT(allocator);
14472 
14473  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14474  {
14475  return;
14476  }
14477 
14478  VMA_DEBUG_LOG("vmaDestroyBuffer");
14479 
14480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14481 
14482 #if VMA_RECORDING_ENABLED
14483  if(allocator->GetRecorder() != VMA_NULL)
14484  {
14485  allocator->GetRecorder()->RecordDestroyBuffer(
14486  allocator->GetCurrentFrameIndex(),
14487  allocation);
14488  }
14489 #endif
14490 
14491  if(buffer != VK_NULL_HANDLE)
14492  {
14493  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14494  }
14495 
14496  if(allocation != VK_NULL_HANDLE)
14497  {
14498  allocator->FreeMemory(allocation);
14499  }
14500 }
14501 
14502 VkResult vmaCreateImage(
14503  VmaAllocator allocator,
14504  const VkImageCreateInfo* pImageCreateInfo,
14505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14506  VkImage* pImage,
14507  VmaAllocation* pAllocation,
14508  VmaAllocationInfo* pAllocationInfo)
14509 {
14510  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14511 
14512  if(pImageCreateInfo->extent.width == 0 ||
14513  pImageCreateInfo->extent.height == 0 ||
14514  pImageCreateInfo->extent.depth == 0 ||
14515  pImageCreateInfo->mipLevels == 0 ||
14516  pImageCreateInfo->arrayLayers == 0)
14517  {
14518  return VK_ERROR_VALIDATION_FAILED_EXT;
14519  }
14520 
14521  VMA_DEBUG_LOG("vmaCreateImage");
14522 
14523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14524 
14525  *pImage = VK_NULL_HANDLE;
14526  *pAllocation = VK_NULL_HANDLE;
14527 
14528  // 1. Create VkImage.
14529  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14530  allocator->m_hDevice,
14531  pImageCreateInfo,
14532  allocator->GetAllocationCallbacks(),
14533  pImage);
14534  if(res >= 0)
14535  {
14536  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14537  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14538  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14539 
14540  // 2. Allocate memory using allocator.
14541  VkMemoryRequirements vkMemReq = {};
14542  bool requiresDedicatedAllocation = false;
14543  bool prefersDedicatedAllocation = false;
14544  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14545  requiresDedicatedAllocation, prefersDedicatedAllocation);
14546 
14547  res = allocator->AllocateMemory(
14548  vkMemReq,
14549  requiresDedicatedAllocation,
14550  prefersDedicatedAllocation,
14551  VK_NULL_HANDLE, // dedicatedBuffer
14552  *pImage, // dedicatedImage
14553  *pAllocationCreateInfo,
14554  suballocType,
14555  pAllocation);
14556 
14557 #if VMA_RECORDING_ENABLED
14558  if(allocator->GetRecorder() != VMA_NULL)
14559  {
14560  allocator->GetRecorder()->RecordCreateImage(
14561  allocator->GetCurrentFrameIndex(),
14562  *pImageCreateInfo,
14563  *pAllocationCreateInfo,
14564  *pAllocation);
14565  }
14566 #endif
14567 
14568  if(res >= 0)
14569  {
14570  // 3. Bind image with memory.
14571  res = allocator->BindImageMemory(*pAllocation, *pImage);
14572  if(res >= 0)
14573  {
14574  // All steps succeeded.
14575  #if VMA_STATS_STRING_ENABLED
14576  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14577  #endif
14578  if(pAllocationInfo != VMA_NULL)
14579  {
14580  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14581  }
14582 
14583  return VK_SUCCESS;
14584  }
14585  allocator->FreeMemory(*pAllocation);
14586  *pAllocation = VK_NULL_HANDLE;
14587  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14588  *pImage = VK_NULL_HANDLE;
14589  return res;
14590  }
14591  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14592  *pImage = VK_NULL_HANDLE;
14593  return res;
14594  }
14595  return res;
14596 }
14597 
14598 void vmaDestroyImage(
14599  VmaAllocator allocator,
14600  VkImage image,
14601  VmaAllocation allocation)
14602 {
14603  VMA_ASSERT(allocator);
14604 
14605  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14606  {
14607  return;
14608  }
14609 
14610  VMA_DEBUG_LOG("vmaDestroyImage");
14611 
14612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14613 
14614 #if VMA_RECORDING_ENABLED
14615  if(allocator->GetRecorder() != VMA_NULL)
14616  {
14617  allocator->GetRecorder()->RecordDestroyImage(
14618  allocator->GetCurrentFrameIndex(),
14619  allocation);
14620  }
14621 #endif
14622 
14623  if(image != VK_NULL_HANDLE)
14624  {
14625  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14626  }
14627  if(allocation != VK_NULL_HANDLE)
14628  {
14629  allocator->FreeMemory(allocation);
14630  }
14631 }
14632 
14633 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1584
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1885
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1479 /*
1480 Define this macro to 0/1 to disable/enable support for recording functionality,
1481 available through VmaAllocatorCreateInfo::pRecordSettings.
1482 */
1483 #ifndef VMA_RECORDING_ENABLED
1484  #ifdef _WIN32
1485  #define VMA_RECORDING_ENABLED 1
1486  #else
1487  #define VMA_RECORDING_ENABLED 0
1488  #endif
1489 #endif
1490 
1491 #ifndef NOMINMAX
1492  #define NOMINMAX // For windows.h
1493 #endif
1494 
1495 #include <vulkan/vulkan.h>
1496 
1497 #if VMA_RECORDING_ENABLED
1498  #include <windows.h>
1499 #endif
1500 
1501 #if !defined(VMA_DEDICATED_ALLOCATION)
1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1503  #define VMA_DEDICATED_ALLOCATION 1
1504  #else
1505  #define VMA_DEDICATED_ALLOCATION 0
1506  #endif
1507 #endif
1508 
1518 VK_DEFINE_HANDLE(VmaAllocator)
1519 
1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1522  VmaAllocator allocator,
1523  uint32_t memoryType,
1524  VkDeviceMemory memory,
1525  VkDeviceSize size);
1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1528  VmaAllocator allocator,
1529  uint32_t memoryType,
1530  VkDeviceMemory memory,
1531  VkDeviceSize size);
1532 
1546 
1576 
1579 typedef VkFlags VmaAllocatorCreateFlags;
1580 
1585 typedef struct VmaVulkanFunctions {
1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1588  PFN_vkAllocateMemory vkAllocateMemory;
1589  PFN_vkFreeMemory vkFreeMemory;
1590  PFN_vkMapMemory vkMapMemory;
1591  PFN_vkUnmapMemory vkUnmapMemory;
1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1594  PFN_vkBindBufferMemory vkBindBufferMemory;
1595  PFN_vkBindImageMemory vkBindImageMemory;
1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1598  PFN_vkCreateBuffer vkCreateBuffer;
1599  PFN_vkDestroyBuffer vkDestroyBuffer;
1600  PFN_vkCreateImage vkCreateImage;
1601  PFN_vkDestroyImage vkDestroyImage;
1602 #if VMA_DEDICATED_ALLOCATION
1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1605 #endif
1607 
1609 typedef enum VmaRecordFlagBits {
1616 
1619 typedef VkFlags VmaRecordFlags;
1620 
1622 typedef struct VmaRecordSettings
1623 {
1633  const char* pFilePath;
1635 
1638 {
1642 
1643  VkPhysicalDevice physicalDevice;
1645 
1646  VkDevice device;
1648 
1651 
1652  const VkAllocationCallbacks* pAllocationCallbacks;
1654 
1693  const VkDeviceSize* pHeapSizeLimit;
1714 
1716 VkResult vmaCreateAllocator(
1717  const VmaAllocatorCreateInfo* pCreateInfo,
1718  VmaAllocator* pAllocator);
1719 
1721 void vmaDestroyAllocator(
1722  VmaAllocator allocator);
1723 
1729  VmaAllocator allocator,
1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1731 
1737  VmaAllocator allocator,
1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1739 
1747  VmaAllocator allocator,
1748  uint32_t memoryTypeIndex,
1749  VkMemoryPropertyFlags* pFlags);
1750 
1760  VmaAllocator allocator,
1761  uint32_t frameIndex);
1762 
1765 typedef struct VmaStatInfo
1766 {
1768  uint32_t blockCount;
1774  VkDeviceSize usedBytes;
1776  VkDeviceSize unusedBytes;
1779 } VmaStatInfo;
1780 
1782 typedef struct VmaStats
1783 {
1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1787 } VmaStats;
1788 
1790 void vmaCalculateStats(
1791  VmaAllocator allocator,
1792  VmaStats* pStats);
1793 
1794 #define VMA_STATS_STRING_ENABLED 1
1795 
1796 #if VMA_STATS_STRING_ENABLED
1797 
1799 
1801 void vmaBuildStatsString(
1802  VmaAllocator allocator,
1803  char** ppStatsString,
1804  VkBool32 detailedMap);
1805 
1806 void vmaFreeStatsString(
1807  VmaAllocator allocator,
1808  char* pStatsString);
1809 
1810 #endif // #if VMA_STATS_STRING_ENABLED
1811 
1820 VK_DEFINE_HANDLE(VmaPool)
1821 
1822 typedef enum VmaMemoryUsage
1823 {
1872 } VmaMemoryUsage;
1873 
1888 
1943 
1956 
1966 
1973 
1977 
1979 {
1992  VkMemoryPropertyFlags requiredFlags;
1997  VkMemoryPropertyFlags preferredFlags;
2005  uint32_t memoryTypeBits;
2018  void* pUserData;
2020 
2037 VkResult vmaFindMemoryTypeIndex(
2038  VmaAllocator allocator,
2039  uint32_t memoryTypeBits,
2040  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2041  uint32_t* pMemoryTypeIndex);
2042 
2056  VmaAllocator allocator,
2057  const VkBufferCreateInfo* pBufferCreateInfo,
2058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2059  uint32_t* pMemoryTypeIndex);
2060 
2074  VmaAllocator allocator,
2075  const VkImageCreateInfo* pImageCreateInfo,
2076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2077  uint32_t* pMemoryTypeIndex);
2078 
2099 
2116 
2127 
2133 
2136 typedef VkFlags VmaPoolCreateFlags;
2137 
2140 typedef struct VmaPoolCreateInfo {
2155  VkDeviceSize blockSize;
2184 
2187 typedef struct VmaPoolStats {
2190  VkDeviceSize size;
2193  VkDeviceSize unusedSize;
2206  VkDeviceSize unusedRangeSizeMax;
2209  size_t blockCount;
2210 } VmaPoolStats;
2211 
2218 VkResult vmaCreatePool(
2219  VmaAllocator allocator,
2220  const VmaPoolCreateInfo* pCreateInfo,
2221  VmaPool* pPool);
2222 
2225 void vmaDestroyPool(
2226  VmaAllocator allocator,
2227  VmaPool pool);
2228 
2235 void vmaGetPoolStats(
2236  VmaAllocator allocator,
2237  VmaPool pool,
2238  VmaPoolStats* pPoolStats);
2239 
2247  VmaAllocator allocator,
2248  VmaPool pool,
2249  size_t* pLostAllocationCount);
2250 
2265 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2266 
2291 VK_DEFINE_HANDLE(VmaAllocation)
2292 
2293 
2295 typedef struct VmaAllocationInfo {
2300  uint32_t memoryType;
2309  VkDeviceMemory deviceMemory;
2314  VkDeviceSize offset;
2319  VkDeviceSize size;
2333  void* pUserData;
2335 
2346 VkResult vmaAllocateMemory(
2347  VmaAllocator allocator,
2348  const VkMemoryRequirements* pVkMemoryRequirements,
2349  const VmaAllocationCreateInfo* pCreateInfo,
2350  VmaAllocation* pAllocation,
2351  VmaAllocationInfo* pAllocationInfo);
2352 
2360  VmaAllocator allocator,
2361  VkBuffer buffer,
2362  const VmaAllocationCreateInfo* pCreateInfo,
2363  VmaAllocation* pAllocation,
2364  VmaAllocationInfo* pAllocationInfo);
2365 
2367 VkResult vmaAllocateMemoryForImage(
2368  VmaAllocator allocator,
2369  VkImage image,
2370  const VmaAllocationCreateInfo* pCreateInfo,
2371  VmaAllocation* pAllocation,
2372  VmaAllocationInfo* pAllocationInfo);
2373 
2375 void vmaFreeMemory(
2376  VmaAllocator allocator,
2377  VmaAllocation allocation);
2378 
2399 VkResult vmaResizeAllocation(
2400  VmaAllocator allocator,
2401  VmaAllocation allocation,
2402  VkDeviceSize newSize);
2403 
2421  VmaAllocator allocator,
2422  VmaAllocation allocation,
2423  VmaAllocationInfo* pAllocationInfo);
2424 
2439 VkBool32 vmaTouchAllocation(
2440  VmaAllocator allocator,
2441  VmaAllocation allocation);
2442 
2457  VmaAllocator allocator,
2458  VmaAllocation allocation,
2459  void* pUserData);
2460 
2472  VmaAllocator allocator,
2473  VmaAllocation* pAllocation);
2474 
2509 VkResult vmaMapMemory(
2510  VmaAllocator allocator,
2511  VmaAllocation allocation,
2512  void** ppData);
2513 
2518 void vmaUnmapMemory(
2519  VmaAllocator allocator,
2520  VmaAllocation allocation);
2521 
2534 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2535 
2548 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2549 
2566 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2567 
2569 typedef struct VmaDefragmentationInfo {
2574  VkDeviceSize maxBytesToMove;
2581 
2583 typedef struct VmaDefragmentationStats {
2585  VkDeviceSize bytesMoved;
2587  VkDeviceSize bytesFreed;
2593 
2632 VkResult vmaDefragment(
2633  VmaAllocator allocator,
2634  VmaAllocation* pAllocations,
2635  size_t allocationCount,
2636  VkBool32* pAllocationsChanged,
2637  const VmaDefragmentationInfo *pDefragmentationInfo,
2638  VmaDefragmentationStats* pDefragmentationStats);
2639 
2652 VkResult vmaBindBufferMemory(
2653  VmaAllocator allocator,
2654  VmaAllocation allocation,
2655  VkBuffer buffer);
2656 
2669 VkResult vmaBindImageMemory(
2670  VmaAllocator allocator,
2671  VmaAllocation allocation,
2672  VkImage image);
2673 
2700 VkResult vmaCreateBuffer(
2701  VmaAllocator allocator,
2702  const VkBufferCreateInfo* pBufferCreateInfo,
2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2704  VkBuffer* pBuffer,
2705  VmaAllocation* pAllocation,
2706  VmaAllocationInfo* pAllocationInfo);
2707 
2719 void vmaDestroyBuffer(
2720  VmaAllocator allocator,
2721  VkBuffer buffer,
2722  VmaAllocation allocation);
2723 
2725 VkResult vmaCreateImage(
2726  VmaAllocator allocator,
2727  const VkImageCreateInfo* pImageCreateInfo,
2728  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2729  VkImage* pImage,
2730  VmaAllocation* pAllocation,
2731  VmaAllocationInfo* pAllocationInfo);
2732 
2744 void vmaDestroyImage(
2745  VmaAllocator allocator,
2746  VkImage image,
2747  VmaAllocation allocation);
2748 
2749 #ifdef __cplusplus
2750 }
2751 #endif
2752 
2753 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2754 
2755 // For Visual Studio IntelliSense.
2756 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2757 #define VMA_IMPLEMENTATION
2758 #endif
2759 
2760 #ifdef VMA_IMPLEMENTATION
2761 #undef VMA_IMPLEMENTATION
2762 
2763 #include <cstdint>
2764 #include <cstdlib>
2765 #include <cstring>
2766 
2767 /*******************************************************************************
2768 CONFIGURATION SECTION
2769 
2770 Define some of these macros before each #include of this header or change them
2771 here if you need other then default behavior depending on your environment.
2772 */
2773 
2774 /*
2775 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2776 internally, like:
2777 
2778  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2779 
2780 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2781 VmaAllocatorCreateInfo::pVulkanFunctions.
2782 */
2783 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2784 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2785 #endif
2786 
2787 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2788 //#define VMA_USE_STL_CONTAINERS 1
2789 
2790 /* Set this macro to 1 to make the library including and using STL containers:
2791 std::pair, std::vector, std::list, std::unordered_map.
2792 
2793 Set it to 0 or undefined to make the library using its own implementation of
2794 the containers.
2795 */
2796 #if VMA_USE_STL_CONTAINERS
2797  #define VMA_USE_STL_VECTOR 1
2798  #define VMA_USE_STL_UNORDERED_MAP 1
2799  #define VMA_USE_STL_LIST 1
2800 #endif
2801 
2802 #if VMA_USE_STL_VECTOR
2803  #include <vector>
2804 #endif
2805 
2806 #if VMA_USE_STL_UNORDERED_MAP
2807  #include <unordered_map>
2808 #endif
2809 
2810 #if VMA_USE_STL_LIST
2811  #include <list>
2812 #endif
2813 
2814 /*
2815 Following headers are used in this CONFIGURATION section only, so feel free to
2816 remove them if not needed.
2817 */
2818 #include <cassert> // for assert
2819 #include <algorithm> // for min, max
2820 #include <mutex> // for std::mutex
2821 #include <atomic> // for std::atomic
2822 
2823 #ifndef VMA_NULL
2824  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2825  #define VMA_NULL nullptr
2826 #endif
2827 
2828 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2829 #include <cstdlib>
2830 void *aligned_alloc(size_t alignment, size_t size)
2831 {
2832  // alignment must be >= sizeof(void*)
2833  if(alignment < sizeof(void*))
2834  {
2835  alignment = sizeof(void*);
2836  }
2837 
2838  return memalign(alignment, size);
2839 }
2840 #elif defined(__APPLE__) || defined(__ANDROID__)
2841 #include <cstdlib>
2842 void *aligned_alloc(size_t alignment, size_t size)
2843 {
2844  // alignment must be >= sizeof(void*)
2845  if(alignment < sizeof(void*))
2846  {
2847  alignment = sizeof(void*);
2848  }
2849 
2850  void *pointer;
2851  if(posix_memalign(&pointer, alignment, size) == 0)
2852  return pointer;
2853  return VMA_NULL;
2854 }
2855 #endif
2856 
2857 // If your compiler is not compatible with C++11 and definition of
2858 // aligned_alloc() function is missing, uncommeting following line may help:
2859 
2860 //#include <malloc.h>
2861 
2862 // Normal assert to check for programmer's errors, especially in Debug configuration.
2863 #ifndef VMA_ASSERT
2864  #ifdef _DEBUG
2865  #define VMA_ASSERT(expr) assert(expr)
2866  #else
2867  #define VMA_ASSERT(expr)
2868  #endif
2869 #endif
2870 
2871 // Assert that will be called very often, like inside data structures e.g. operator[].
2872 // Making it non-empty can make program slow.
2873 #ifndef VMA_HEAVY_ASSERT
2874  #ifdef _DEBUG
2875  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2876  #else
2877  #define VMA_HEAVY_ASSERT(expr)
2878  #endif
2879 #endif
2880 
2881 #ifndef VMA_ALIGN_OF
2882  #define VMA_ALIGN_OF(type) (__alignof(type))
2883 #endif
2884 
2885 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2886  #if defined(_WIN32)
2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2888  #else
2889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2890  #endif
2891 #endif
2892 
2893 #ifndef VMA_SYSTEM_FREE
2894  #if defined(_WIN32)
2895  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2896  #else
2897  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2898  #endif
2899 #endif
2900 
2901 #ifndef VMA_MIN
2902  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2903 #endif
2904 
2905 #ifndef VMA_MAX
2906  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2907 #endif
2908 
2909 #ifndef VMA_SWAP
2910  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2911 #endif
2912 
2913 #ifndef VMA_SORT
2914  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2915 #endif
2916 
2917 #ifndef VMA_DEBUG_LOG
2918  #define VMA_DEBUG_LOG(format, ...)
2919  /*
2920  #define VMA_DEBUG_LOG(format, ...) do { \
2921  printf(format, __VA_ARGS__); \
2922  printf("\n"); \
2923  } while(false)
2924  */
2925 #endif
2926 
2927 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2928 #if VMA_STATS_STRING_ENABLED
2929  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2930  {
2931  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2932  }
2933  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2934  {
2935  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2936  }
2937  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2938  {
2939  snprintf(outStr, strLen, "%p", ptr);
2940  }
2941 #endif
2942 
2943 #ifndef VMA_MUTEX
2944  class VmaMutex
2945  {
2946  public:
2947  VmaMutex() { }
2948  ~VmaMutex() { }
2949  void Lock() { m_Mutex.lock(); }
2950  void Unlock() { m_Mutex.unlock(); }
2951  private:
2952  std::mutex m_Mutex;
2953  };
2954  #define VMA_MUTEX VmaMutex
2955 #endif
2956 
2957 /*
2958 If providing your own implementation, you need to implement a subset of std::atomic:
2959 
2960 - Constructor(uint32_t desired)
2961 - uint32_t load() const
2962 - void store(uint32_t desired)
2963 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2964 */
2965 #ifndef VMA_ATOMIC_UINT32
2966  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2967 #endif
2968 
2969 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2970 
2974  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2975 #endif
2976 
2977 #ifndef VMA_DEBUG_ALIGNMENT
2978 
2982  #define VMA_DEBUG_ALIGNMENT (1)
2983 #endif
2984 
2985 #ifndef VMA_DEBUG_MARGIN
2986 
2990  #define VMA_DEBUG_MARGIN (0)
2991 #endif
2992 
2993 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2994 
2998  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2999 #endif
3000 
3001 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3002 
3007  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3008 #endif
3009 
3010 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3011 
3015  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3016 #endif
3017 
3018 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3019 
3023  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3024 #endif
3025 
3026 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3027  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3029 #endif
3030 
3031 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3032  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3034 #endif
3035 
3036 #ifndef VMA_CLASS_NO_COPY
3037  #define VMA_CLASS_NO_COPY(className) \
3038  private: \
3039  className(const className&) = delete; \
3040  className& operator=(const className&) = delete;
3041 #endif
3042 
3043 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3044 
3045 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3046 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3047 
3048 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3049 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3050 
3051 /*******************************************************************************
3052 END OF CONFIGURATION
3053 */
3054 
3055 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3056  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3057 
3058 // Returns number of bits set to 1 in (v).
3059 static inline uint32_t VmaCountBitsSet(uint32_t v)
3060 {
3061  uint32_t c = v - ((v >> 1) & 0x55555555);
3062  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3063  c = ((c >> 4) + c) & 0x0F0F0F0F;
3064  c = ((c >> 8) + c) & 0x00FF00FF;
3065  c = ((c >> 16) + c) & 0x0000FFFF;
3066  return c;
3067 }
3068 
3069 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3070 // Use types like uint32_t, uint64_t as T.
3071 template <typename T>
3072 static inline T VmaAlignUp(T val, T align)
3073 {
3074  return (val + align - 1) / align * align;
3075 }
3076 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3077 // Use types like uint32_t, uint64_t as T.
3078 template <typename T>
3079 static inline T VmaAlignDown(T val, T align)
3080 {
3081  return val / align * align;
3082 }
3083 
3084 // Division with mathematical rounding to nearest number.
3085 template <typename T>
3086 static inline T VmaRoundDiv(T x, T y)
3087 {
3088  return (x + (y / (T)2)) / y;
3089 }
3090 
3091 /*
3092 Returns true if given number is a power of two.
3093 T must be unsigned integer number or signed integer but always nonnegative.
3094 For 0 returns true.
3095 */
3096 template <typename T>
3097 inline bool VmaIsPow2(T x)
3098 {
3099  return (x & (x-1)) == 0;
3100 }
3101 
3102 // Returns smallest power of 2 greater or equal to v.
3103 static inline uint32_t VmaNextPow2(uint32_t v)
3104 {
3105  v--;
3106  v |= v >> 1;
3107  v |= v >> 2;
3108  v |= v >> 4;
3109  v |= v >> 8;
3110  v |= v >> 16;
3111  v++;
3112  return v;
3113 }
3114 static inline uint64_t VmaNextPow2(uint64_t v)
3115 {
3116  v--;
3117  v |= v >> 1;
3118  v |= v >> 2;
3119  v |= v >> 4;
3120  v |= v >> 8;
3121  v |= v >> 16;
3122  v |= v >> 32;
3123  v++;
3124  return v;
3125 }
3126 
3127 // Returns largest power of 2 less or equal to v.
3128 static inline uint32_t VmaPrevPow2(uint32_t v)
3129 {
3130  v |= v >> 1;
3131  v |= v >> 2;
3132  v |= v >> 4;
3133  v |= v >> 8;
3134  v |= v >> 16;
3135  v = v ^ (v >> 1);
3136  return v;
3137 }
3138 static inline uint64_t VmaPrevPow2(uint64_t v)
3139 {
3140  v |= v >> 1;
3141  v |= v >> 2;
3142  v |= v >> 4;
3143  v |= v >> 8;
3144  v |= v >> 16;
3145  v |= v >> 32;
3146  v = v ^ (v >> 1);
3147  return v;
3148 }
3149 
3150 static inline bool VmaStrIsEmpty(const char* pStr)
3151 {
3152  return pStr == VMA_NULL || *pStr == '\0';
3153 }
3154 
3155 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3156 {
3157  switch(algorithm)
3158  {
3160  return "Linear";
3162  return "Buddy";
3163  case 0:
3164  return "Default";
3165  default:
3166  VMA_ASSERT(0);
3167  return "";
3168  }
3169 }
3170 
3171 #ifndef VMA_SORT
3172 
3173 template<typename Iterator, typename Compare>
3174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3175 {
3176  Iterator centerValue = end; --centerValue;
3177  Iterator insertIndex = beg;
3178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3179  {
3180  if(cmp(*memTypeIndex, *centerValue))
3181  {
3182  if(insertIndex != memTypeIndex)
3183  {
3184  VMA_SWAP(*memTypeIndex, *insertIndex);
3185  }
3186  ++insertIndex;
3187  }
3188  }
3189  if(insertIndex != centerValue)
3190  {
3191  VMA_SWAP(*insertIndex, *centerValue);
3192  }
3193  return insertIndex;
3194 }
3195 
3196 template<typename Iterator, typename Compare>
3197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3198 {
3199  if(beg < end)
3200  {
3201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3204  }
3205 }
3206 
3207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3208 
3209 #endif // #ifndef VMA_SORT
3210 
3211 /*
3212 Returns true if two memory blocks occupy overlapping pages.
3213 ResourceA must be in less memory offset than ResourceB.
3214 
3215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3217 */
3218 static inline bool VmaBlocksOnSamePage(
3219  VkDeviceSize resourceAOffset,
3220  VkDeviceSize resourceASize,
3221  VkDeviceSize resourceBOffset,
3222  VkDeviceSize pageSize)
3223 {
3224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3227  VkDeviceSize resourceBStart = resourceBOffset;
3228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3229  return resourceAEndPage == resourceBStartPage;
3230 }
3231 
3232 enum VmaSuballocationType
3233 {
3234  VMA_SUBALLOCATION_TYPE_FREE = 0,
3235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3241 };
3242 
3243 /*
3244 Returns true if given suballocation types could conflict and must respect
3245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3246 or linear image and another one is optimal image. If type is unknown, behave
3247 conservatively.
3248 */
3249 static inline bool VmaIsBufferImageGranularityConflict(
3250  VmaSuballocationType suballocType1,
3251  VmaSuballocationType suballocType2)
3252 {
3253  if(suballocType1 > suballocType2)
3254  {
3255  VMA_SWAP(suballocType1, suballocType2);
3256  }
3257 
3258  switch(suballocType1)
3259  {
3260  case VMA_SUBALLOCATION_TYPE_FREE:
3261  return false;
3262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3263  return true;
3264  case VMA_SUBALLOCATION_TYPE_BUFFER:
3265  return
3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3269  return
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3274  return
3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3277  return false;
3278  default:
3279  VMA_ASSERT(0);
3280  return true;
3281  }
3282 }
3283 
3284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3285 {
3286  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3287  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3288  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3289  {
3290  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3291  }
3292 }
3293 
3294 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3295 {
3296  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3297  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3298  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3299  {
3300  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3301  {
3302  return false;
3303  }
3304  }
3305  return true;
3306 }
3307 
3308 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3309 struct VmaMutexLock
3310 {
3311  VMA_CLASS_NO_COPY(VmaMutexLock)
3312 public:
3313  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3314  m_pMutex(useMutex ? &mutex : VMA_NULL)
3315  {
3316  if(m_pMutex)
3317  {
3318  m_pMutex->Lock();
3319  }
3320  }
3321 
3322  ~VmaMutexLock()
3323  {
3324  if(m_pMutex)
3325  {
3326  m_pMutex->Unlock();
3327  }
3328  }
3329 
3330 private:
3331  VMA_MUTEX* m_pMutex;
3332 };
3333 
3334 #if VMA_DEBUG_GLOBAL_MUTEX
3335  static VMA_MUTEX gDebugGlobalMutex;
3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3337 #else
3338  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3339 #endif
3340 
3341 // Minimum size of a free suballocation to register it in the free suballocation collection.
3342 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3343 
3344 /*
3345 Performs binary search and returns iterator to first element that is greater or
3346 equal to (key), according to comparison (cmp).
3347 
3348 Cmp should return true if first argument is less than second argument.
3349 
3350 Returned value is the found element, if present in the collection or place where
3351 new element with value (key) should be inserted.
3352 */
3353 template <typename CmpLess, typename IterT, typename KeyT>
3354 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3355 {
3356  size_t down = 0, up = (end - beg);
3357  while(down < up)
3358  {
3359  const size_t mid = (down + up) / 2;
3360  if(cmp(*(beg+mid), key))
3361  {
3362  down = mid + 1;
3363  }
3364  else
3365  {
3366  up = mid;
3367  }
3368  }
3369  return beg + down;
3370 }
3371 
3373 // Memory allocation
3374 
3375 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3376 {
3377  if((pAllocationCallbacks != VMA_NULL) &&
3378  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3379  {
3380  return (*pAllocationCallbacks->pfnAllocation)(
3381  pAllocationCallbacks->pUserData,
3382  size,
3383  alignment,
3384  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3385  }
3386  else
3387  {
3388  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3389  }
3390 }
3391 
3392 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3393 {
3394  if((pAllocationCallbacks != VMA_NULL) &&
3395  (pAllocationCallbacks->pfnFree != VMA_NULL))
3396  {
3397  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3398  }
3399  else
3400  {
3401  VMA_SYSTEM_FREE(ptr);
3402  }
3403 }
3404 
3405 template<typename T>
3406 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3407 {
3408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3409 }
3410 
3411 template<typename T>
3412 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3413 {
3414  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3415 }
3416 
3417 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3418 
3419 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3420 
3421 template<typename T>
3422 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3423 {
3424  ptr->~T();
3425  VmaFree(pAllocationCallbacks, ptr);
3426 }
3427 
3428 template<typename T>
3429 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3430 {
3431  if(ptr != VMA_NULL)
3432  {
3433  for(size_t i = count; i--; )
3434  {
3435  ptr[i].~T();
3436  }
3437  VmaFree(pAllocationCallbacks, ptr);
3438  }
3439 }
3440 
3441 // STL-compatible allocator.
3442 template<typename T>
3443 class VmaStlAllocator
3444 {
3445 public:
3446  const VkAllocationCallbacks* const m_pCallbacks;
3447  typedef T value_type;
3448 
3449  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3450  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3451 
3452  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3453  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3454 
3455  template<typename U>
3456  bool operator==(const VmaStlAllocator<U>& rhs) const
3457  {
3458  return m_pCallbacks == rhs.m_pCallbacks;
3459  }
3460  template<typename U>
3461  bool operator!=(const VmaStlAllocator<U>& rhs) const
3462  {
3463  return m_pCallbacks != rhs.m_pCallbacks;
3464  }
3465 
3466  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3467 };
3468 
3469 #if VMA_USE_STL_VECTOR
3470 
3471 #define VmaVector std::vector
3472 
3473 template<typename T, typename allocatorT>
3474 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3475 {
3476  vec.insert(vec.begin() + index, item);
3477 }
3478 
3479 template<typename T, typename allocatorT>
3480 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3481 {
3482  vec.erase(vec.begin() + index);
3483 }
3484 
3485 #else // #if VMA_USE_STL_VECTOR
3486 
3487 /* Class with interface compatible with subset of std::vector.
3488 T must be POD because constructors and destructors are not called and memcpy is
3489 used for these objects. */
3490 template<typename T, typename AllocatorT>
3491 class VmaVector
3492 {
3493 public:
3494  typedef T value_type;
3495 
3496  VmaVector(const AllocatorT& allocator) :
3497  m_Allocator(allocator),
3498  m_pArray(VMA_NULL),
3499  m_Count(0),
3500  m_Capacity(0)
3501  {
3502  }
3503 
3504  VmaVector(size_t count, const AllocatorT& allocator) :
3505  m_Allocator(allocator),
3506  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3507  m_Count(count),
3508  m_Capacity(count)
3509  {
3510  }
3511 
3512  VmaVector(const VmaVector<T, AllocatorT>& src) :
3513  m_Allocator(src.m_Allocator),
3514  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3515  m_Count(src.m_Count),
3516  m_Capacity(src.m_Count)
3517  {
3518  if(m_Count != 0)
3519  {
3520  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3521  }
3522  }
3523 
3524  ~VmaVector()
3525  {
3526  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3527  }
3528 
3529  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3530  {
3531  if(&rhs != this)
3532  {
3533  resize(rhs.m_Count);
3534  if(m_Count != 0)
3535  {
3536  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3537  }
3538  }
3539  return *this;
3540  }
3541 
3542  bool empty() const { return m_Count == 0; }
3543  size_t size() const { return m_Count; }
3544  T* data() { return m_pArray; }
3545  const T* data() const { return m_pArray; }
3546 
3547  T& operator[](size_t index)
3548  {
3549  VMA_HEAVY_ASSERT(index < m_Count);
3550  return m_pArray[index];
3551  }
3552  const T& operator[](size_t index) const
3553  {
3554  VMA_HEAVY_ASSERT(index < m_Count);
3555  return m_pArray[index];
3556  }
3557 
3558  T& front()
3559  {
3560  VMA_HEAVY_ASSERT(m_Count > 0);
3561  return m_pArray[0];
3562  }
3563  const T& front() const
3564  {
3565  VMA_HEAVY_ASSERT(m_Count > 0);
3566  return m_pArray[0];
3567  }
3568  T& back()
3569  {
3570  VMA_HEAVY_ASSERT(m_Count > 0);
3571  return m_pArray[m_Count - 1];
3572  }
3573  const T& back() const
3574  {
3575  VMA_HEAVY_ASSERT(m_Count > 0);
3576  return m_pArray[m_Count - 1];
3577  }
3578 
3579  void reserve(size_t newCapacity, bool freeMemory = false)
3580  {
3581  newCapacity = VMA_MAX(newCapacity, m_Count);
3582 
3583  if((newCapacity < m_Capacity) && !freeMemory)
3584  {
3585  newCapacity = m_Capacity;
3586  }
3587 
3588  if(newCapacity != m_Capacity)
3589  {
3590  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3591  if(m_Count != 0)
3592  {
3593  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3594  }
3595  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3596  m_Capacity = newCapacity;
3597  m_pArray = newArray;
3598  }
3599  }
3600 
3601  void resize(size_t newCount, bool freeMemory = false)
3602  {
3603  size_t newCapacity = m_Capacity;
3604  if(newCount > m_Capacity)
3605  {
3606  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3607  }
3608  else if(freeMemory)
3609  {
3610  newCapacity = newCount;
3611  }
3612 
3613  if(newCapacity != m_Capacity)
3614  {
3615  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3616  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3617  if(elementsToCopy != 0)
3618  {
3619  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3620  }
3621  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3622  m_Capacity = newCapacity;
3623  m_pArray = newArray;
3624  }
3625 
3626  m_Count = newCount;
3627  }
3628 
3629  void clear(bool freeMemory = false)
3630  {
3631  resize(0, freeMemory);
3632  }
3633 
3634  void insert(size_t index, const T& src)
3635  {
3636  VMA_HEAVY_ASSERT(index <= m_Count);
3637  const size_t oldCount = size();
3638  resize(oldCount + 1);
3639  if(index < oldCount)
3640  {
3641  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3642  }
3643  m_pArray[index] = src;
3644  }
3645 
3646  void remove(size_t index)
3647  {
3648  VMA_HEAVY_ASSERT(index < m_Count);
3649  const size_t oldCount = size();
3650  if(index < oldCount - 1)
3651  {
3652  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3653  }
3654  resize(oldCount - 1);
3655  }
3656 
3657  void push_back(const T& src)
3658  {
3659  const size_t newIndex = size();
3660  resize(newIndex + 1);
3661  m_pArray[newIndex] = src;
3662  }
3663 
3664  void pop_back()
3665  {
3666  VMA_HEAVY_ASSERT(m_Count > 0);
3667  resize(size() - 1);
3668  }
3669 
3670  void push_front(const T& src)
3671  {
3672  insert(0, src);
3673  }
3674 
3675  void pop_front()
3676  {
3677  VMA_HEAVY_ASSERT(m_Count > 0);
3678  remove(0);
3679  }
3680 
3681  typedef T* iterator;
3682 
3683  iterator begin() { return m_pArray; }
3684  iterator end() { return m_pArray + m_Count; }
3685 
3686 private:
3687  AllocatorT m_Allocator;
3688  T* m_pArray;
3689  size_t m_Count;
3690  size_t m_Capacity;
3691 };
3692 
3693 template<typename T, typename allocatorT>
3694 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3695 {
3696  vec.insert(index, item);
3697 }
3698 
3699 template<typename T, typename allocatorT>
3700 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3701 {
3702  vec.remove(index);
3703 }
3704 
3705 #endif // #if VMA_USE_STL_VECTOR
3706 
3707 template<typename CmpLess, typename VectorT>
3708 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3709 {
3710  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3711  vector.data(),
3712  vector.data() + vector.size(),
3713  value,
3714  CmpLess()) - vector.data();
3715  VmaVectorInsert(vector, indexToInsert, value);
3716  return indexToInsert;
3717 }
3718 
3719 template<typename CmpLess, typename VectorT>
3720 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3721 {
3722  CmpLess comparator;
3723  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3724  vector.begin(),
3725  vector.end(),
3726  value,
3727  comparator);
3728  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3729  {
3730  size_t indexToRemove = it - vector.begin();
3731  VmaVectorRemove(vector, indexToRemove);
3732  return true;
3733  }
3734  return false;
3735 }
3736 
3737 template<typename CmpLess, typename IterT, typename KeyT>
3738 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3739 {
3740  CmpLess comparator;
3741  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3742  beg, end, value, comparator);
3743  if(it == end ||
3744  (!comparator(*it, value) && !comparator(value, *it)))
3745  {
3746  return it;
3747  }
3748  return end;
3749 }
3750 
3752 // class VmaPoolAllocator
3753 
3754 /*
3755 Allocator for objects of type T using a list of arrays (pools) to speed up
3756 allocation. Number of elements that can be allocated is not bounded because
3757 allocator can create multiple blocks.
3758 */
3759 template<typename T>
3760 class VmaPoolAllocator
3761 {
3762  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3763 public:
3764  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3765  ~VmaPoolAllocator();
3766  void Clear();
3767  T* Alloc();
3768  void Free(T* ptr);
3769 
3770 private:
3771  union Item
3772  {
3773  uint32_t NextFreeIndex;
3774  T Value;
3775  };
3776 
3777  struct ItemBlock
3778  {
3779  Item* pItems;
3780  uint32_t FirstFreeIndex;
3781  };
3782 
3783  const VkAllocationCallbacks* m_pAllocationCallbacks;
3784  size_t m_ItemsPerBlock;
3785  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3786 
3787  ItemBlock& CreateNewBlock();
3788 };
3789 
3790 template<typename T>
3791 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3792  m_pAllocationCallbacks(pAllocationCallbacks),
3793  m_ItemsPerBlock(itemsPerBlock),
3794  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3795 {
3796  VMA_ASSERT(itemsPerBlock > 0);
3797 }
3798 
3799 template<typename T>
3800 VmaPoolAllocator<T>::~VmaPoolAllocator()
3801 {
3802  Clear();
3803 }
3804 
3805 template<typename T>
3806 void VmaPoolAllocator<T>::Clear()
3807 {
3808  for(size_t i = m_ItemBlocks.size(); i--; )
3809  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3810  m_ItemBlocks.clear();
3811 }
3812 
3813 template<typename T>
3814 T* VmaPoolAllocator<T>::Alloc()
3815 {
3816  for(size_t i = m_ItemBlocks.size(); i--; )
3817  {
3818  ItemBlock& block = m_ItemBlocks[i];
3819  // This block has some free items: Use first one.
3820  if(block.FirstFreeIndex != UINT32_MAX)
3821  {
3822  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3823  block.FirstFreeIndex = pItem->NextFreeIndex;
3824  return &pItem->Value;
3825  }
3826  }
3827 
3828  // No block has free item: Create new one and use it.
3829  ItemBlock& newBlock = CreateNewBlock();
3830  Item* const pItem = &newBlock.pItems[0];
3831  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3832  return &pItem->Value;
3833 }
3834 
3835 template<typename T>
3836 void VmaPoolAllocator<T>::Free(T* ptr)
3837 {
3838  // Search all memory blocks to find ptr.
3839  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3840  {
3841  ItemBlock& block = m_ItemBlocks[i];
3842 
3843  // Casting to union.
3844  Item* pItemPtr;
3845  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3846 
3847  // Check if pItemPtr is in address range of this block.
3848  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3849  {
3850  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3851  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3852  block.FirstFreeIndex = index;
3853  return;
3854  }
3855  }
3856  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3857 }
3858 
3859 template<typename T>
3860 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3861 {
3862  ItemBlock newBlock = {
3863  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3864 
3865  m_ItemBlocks.push_back(newBlock);
3866 
3867  // Setup singly-linked list of all free items in this block.
3868  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3869  newBlock.pItems[i].NextFreeIndex = i + 1;
3870  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3871  return m_ItemBlocks.back();
3872 }
3873 
3875 // class VmaRawList, VmaList
3876 
3877 #if VMA_USE_STL_LIST
3878 
3879 #define VmaList std::list
3880 
3881 #else // #if VMA_USE_STL_LIST
3882 
3883 template<typename T>
3884 struct VmaListItem
3885 {
3886  VmaListItem* pPrev;
3887  VmaListItem* pNext;
3888  T Value;
3889 };
3890 
3891 // Doubly linked list.
3892 template<typename T>
3893 class VmaRawList
3894 {
3895  VMA_CLASS_NO_COPY(VmaRawList)
3896 public:
3897  typedef VmaListItem<T> ItemType;
3898 
3899  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3900  ~VmaRawList();
3901  void Clear();
3902 
3903  size_t GetCount() const { return m_Count; }
3904  bool IsEmpty() const { return m_Count == 0; }
3905 
3906  ItemType* Front() { return m_pFront; }
3907  const ItemType* Front() const { return m_pFront; }
3908  ItemType* Back() { return m_pBack; }
3909  const ItemType* Back() const { return m_pBack; }
3910 
3911  ItemType* PushBack();
3912  ItemType* PushFront();
3913  ItemType* PushBack(const T& value);
3914  ItemType* PushFront(const T& value);
3915  void PopBack();
3916  void PopFront();
3917 
3918  // Item can be null - it means PushBack.
3919  ItemType* InsertBefore(ItemType* pItem);
3920  // Item can be null - it means PushFront.
3921  ItemType* InsertAfter(ItemType* pItem);
3922 
3923  ItemType* InsertBefore(ItemType* pItem, const T& value);
3924  ItemType* InsertAfter(ItemType* pItem, const T& value);
3925 
3926  void Remove(ItemType* pItem);
3927 
3928 private:
3929  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3930  VmaPoolAllocator<ItemType> m_ItemAllocator;
3931  ItemType* m_pFront;
3932  ItemType* m_pBack;
3933  size_t m_Count;
3934 };
3935 
3936 template<typename T>
3937 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3938  m_pAllocationCallbacks(pAllocationCallbacks),
3939  m_ItemAllocator(pAllocationCallbacks, 128),
3940  m_pFront(VMA_NULL),
3941  m_pBack(VMA_NULL),
3942  m_Count(0)
3943 {
3944 }
3945 
3946 template<typename T>
3947 VmaRawList<T>::~VmaRawList()
3948 {
3949  // Intentionally not calling Clear, because that would be unnecessary
3950  // computations to return all items to m_ItemAllocator as free.
3951 }
3952 
3953 template<typename T>
3954 void VmaRawList<T>::Clear()
3955 {
3956  if(IsEmpty() == false)
3957  {
3958  ItemType* pItem = m_pBack;
3959  while(pItem != VMA_NULL)
3960  {
3961  ItemType* const pPrevItem = pItem->pPrev;
3962  m_ItemAllocator.Free(pItem);
3963  pItem = pPrevItem;
3964  }
3965  m_pFront = VMA_NULL;
3966  m_pBack = VMA_NULL;
3967  m_Count = 0;
3968  }
3969 }
3970 
3971 template<typename T>
3972 VmaListItem<T>* VmaRawList<T>::PushBack()
3973 {
3974  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3975  pNewItem->pNext = VMA_NULL;
3976  if(IsEmpty())
3977  {
3978  pNewItem->pPrev = VMA_NULL;
3979  m_pFront = pNewItem;
3980  m_pBack = pNewItem;
3981  m_Count = 1;
3982  }
3983  else
3984  {
3985  pNewItem->pPrev = m_pBack;
3986  m_pBack->pNext = pNewItem;
3987  m_pBack = pNewItem;
3988  ++m_Count;
3989  }
3990  return pNewItem;
3991 }
3992 
3993 template<typename T>
3994 VmaListItem<T>* VmaRawList<T>::PushFront()
3995 {
3996  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3997  pNewItem->pPrev = VMA_NULL;
3998  if(IsEmpty())
3999  {
4000  pNewItem->pNext = VMA_NULL;
4001  m_pFront = pNewItem;
4002  m_pBack = pNewItem;
4003  m_Count = 1;
4004  }
4005  else
4006  {
4007  pNewItem->pNext = m_pFront;
4008  m_pFront->pPrev = pNewItem;
4009  m_pFront = pNewItem;
4010  ++m_Count;
4011  }
4012  return pNewItem;
4013 }
4014 
4015 template<typename T>
4016 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4017 {
4018  ItemType* const pNewItem = PushBack();
4019  pNewItem->Value = value;
4020  return pNewItem;
4021 }
4022 
4023 template<typename T>
4024 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4025 {
4026  ItemType* const pNewItem = PushFront();
4027  pNewItem->Value = value;
4028  return pNewItem;
4029 }
4030 
4031 template<typename T>
4032 void VmaRawList<T>::PopBack()
4033 {
4034  VMA_HEAVY_ASSERT(m_Count > 0);
4035  ItemType* const pBackItem = m_pBack;
4036  ItemType* const pPrevItem = pBackItem->pPrev;
4037  if(pPrevItem != VMA_NULL)
4038  {
4039  pPrevItem->pNext = VMA_NULL;
4040  }
4041  m_pBack = pPrevItem;
4042  m_ItemAllocator.Free(pBackItem);
4043  --m_Count;
4044 }
4045 
4046 template<typename T>
4047 void VmaRawList<T>::PopFront()
4048 {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  ItemType* const pFrontItem = m_pFront;
4051  ItemType* const pNextItem = pFrontItem->pNext;
4052  if(pNextItem != VMA_NULL)
4053  {
4054  pNextItem->pPrev = VMA_NULL;
4055  }
4056  m_pFront = pNextItem;
4057  m_ItemAllocator.Free(pFrontItem);
4058  --m_Count;
4059 }
4060 
4061 template<typename T>
4062 void VmaRawList<T>::Remove(ItemType* pItem)
4063 {
4064  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4065  VMA_HEAVY_ASSERT(m_Count > 0);
4066 
4067  if(pItem->pPrev != VMA_NULL)
4068  {
4069  pItem->pPrev->pNext = pItem->pNext;
4070  }
4071  else
4072  {
4073  VMA_HEAVY_ASSERT(m_pFront == pItem);
4074  m_pFront = pItem->pNext;
4075  }
4076 
4077  if(pItem->pNext != VMA_NULL)
4078  {
4079  pItem->pNext->pPrev = pItem->pPrev;
4080  }
4081  else
4082  {
4083  VMA_HEAVY_ASSERT(m_pBack == pItem);
4084  m_pBack = pItem->pPrev;
4085  }
4086 
4087  m_ItemAllocator.Free(pItem);
4088  --m_Count;
4089 }
4090 
4091 template<typename T>
4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4093 {
4094  if(pItem != VMA_NULL)
4095  {
4096  ItemType* const prevItem = pItem->pPrev;
4097  ItemType* const newItem = m_ItemAllocator.Alloc();
4098  newItem->pPrev = prevItem;
4099  newItem->pNext = pItem;
4100  pItem->pPrev = newItem;
4101  if(prevItem != VMA_NULL)
4102  {
4103  prevItem->pNext = newItem;
4104  }
4105  else
4106  {
4107  VMA_HEAVY_ASSERT(m_pFront == pItem);
4108  m_pFront = newItem;
4109  }
4110  ++m_Count;
4111  return newItem;
4112  }
4113  else
4114  return PushBack();
4115 }
4116 
4117 template<typename T>
4118 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4119 {
4120  if(pItem != VMA_NULL)
4121  {
4122  ItemType* const nextItem = pItem->pNext;
4123  ItemType* const newItem = m_ItemAllocator.Alloc();
4124  newItem->pNext = nextItem;
4125  newItem->pPrev = pItem;
4126  pItem->pNext = newItem;
4127  if(nextItem != VMA_NULL)
4128  {
4129  nextItem->pPrev = newItem;
4130  }
4131  else
4132  {
4133  VMA_HEAVY_ASSERT(m_pBack == pItem);
4134  m_pBack = newItem;
4135  }
4136  ++m_Count;
4137  return newItem;
4138  }
4139  else
4140  return PushFront();
4141 }
4142 
4143 template<typename T>
4144 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4145 {
4146  ItemType* const newItem = InsertBefore(pItem);
4147  newItem->Value = value;
4148  return newItem;
4149 }
4150 
4151 template<typename T>
4152 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4153 {
4154  ItemType* const newItem = InsertAfter(pItem);
4155  newItem->Value = value;
4156  return newItem;
4157 }
4158 
4159 template<typename T, typename AllocatorT>
4160 class VmaList
4161 {
4162  VMA_CLASS_NO_COPY(VmaList)
4163 public:
4164  class iterator
4165  {
4166  public:
4167  iterator() :
4168  m_pList(VMA_NULL),
4169  m_pItem(VMA_NULL)
4170  {
4171  }
4172 
4173  T& operator*() const
4174  {
4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4176  return m_pItem->Value;
4177  }
4178  T* operator->() const
4179  {
4180  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4181  return &m_pItem->Value;
4182  }
4183 
4184  iterator& operator++()
4185  {
4186  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4187  m_pItem = m_pItem->pNext;
4188  return *this;
4189  }
4190  iterator& operator--()
4191  {
4192  if(m_pItem != VMA_NULL)
4193  {
4194  m_pItem = m_pItem->pPrev;
4195  }
4196  else
4197  {
4198  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4199  m_pItem = m_pList->Back();
4200  }
4201  return *this;
4202  }
4203 
4204  iterator operator++(int)
4205  {
4206  iterator result = *this;
4207  ++*this;
4208  return result;
4209  }
4210  iterator operator--(int)
4211  {
4212  iterator result = *this;
4213  --*this;
4214  return result;
4215  }
4216 
4217  bool operator==(const iterator& rhs) const
4218  {
4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4220  return m_pItem == rhs.m_pItem;
4221  }
4222  bool operator!=(const iterator& rhs) const
4223  {
4224  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4225  return m_pItem != rhs.m_pItem;
4226  }
4227 
4228  private:
4229  VmaRawList<T>* m_pList;
4230  VmaListItem<T>* m_pItem;
4231 
4232  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4233  m_pList(pList),
4234  m_pItem(pItem)
4235  {
4236  }
4237 
4238  friend class VmaList<T, AllocatorT>;
4239  };
4240 
4241  class const_iterator
4242  {
4243  public:
4244  const_iterator() :
4245  m_pList(VMA_NULL),
4246  m_pItem(VMA_NULL)
4247  {
4248  }
4249 
4250  const_iterator(const iterator& src) :
4251  m_pList(src.m_pList),
4252  m_pItem(src.m_pItem)
4253  {
4254  }
4255 
4256  const T& operator*() const
4257  {
4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4259  return m_pItem->Value;
4260  }
4261  const T* operator->() const
4262  {
4263  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4264  return &m_pItem->Value;
4265  }
4266 
4267  const_iterator& operator++()
4268  {
4269  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4270  m_pItem = m_pItem->pNext;
4271  return *this;
4272  }
4273  const_iterator& operator--()
4274  {
4275  if(m_pItem != VMA_NULL)
4276  {
4277  m_pItem = m_pItem->pPrev;
4278  }
4279  else
4280  {
4281  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4282  m_pItem = m_pList->Back();
4283  }
4284  return *this;
4285  }
4286 
4287  const_iterator operator++(int)
4288  {
4289  const_iterator result = *this;
4290  ++*this;
4291  return result;
4292  }
4293  const_iterator operator--(int)
4294  {
4295  const_iterator result = *this;
4296  --*this;
4297  return result;
4298  }
4299 
4300  bool operator==(const const_iterator& rhs) const
4301  {
4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4303  return m_pItem == rhs.m_pItem;
4304  }
4305  bool operator!=(const const_iterator& rhs) const
4306  {
4307  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4308  return m_pItem != rhs.m_pItem;
4309  }
4310 
4311  private:
4312  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4313  m_pList(pList),
4314  m_pItem(pItem)
4315  {
4316  }
4317 
4318  const VmaRawList<T>* m_pList;
4319  const VmaListItem<T>* m_pItem;
4320 
4321  friend class VmaList<T, AllocatorT>;
4322  };
4323 
4324  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4325 
4326  bool empty() const { return m_RawList.IsEmpty(); }
4327  size_t size() const { return m_RawList.GetCount(); }
4328 
4329  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4330  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4331 
4332  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4333  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4334 
4335  void clear() { m_RawList.Clear(); }
4336  void push_back(const T& value) { m_RawList.PushBack(value); }
4337  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4338  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4339 
4340 private:
4341  VmaRawList<T> m_RawList;
4342 };
4343 
4344 #endif // #if VMA_USE_STL_LIST
4345 
4347 // class VmaMap
4348 
4349 // Unused in this version.
4350 #if 0
4351 
4352 #if VMA_USE_STL_UNORDERED_MAP
4353 
4354 #define VmaPair std::pair
4355 
4356 #define VMA_MAP_TYPE(KeyT, ValueT) \
4357  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4358 
4359 #else // #if VMA_USE_STL_UNORDERED_MAP
4360 
4361 template<typename T1, typename T2>
4362 struct VmaPair
4363 {
4364  T1 first;
4365  T2 second;
4366 
4367  VmaPair() : first(), second() { }
4368  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4369 };
4370 
4371 /* Class compatible with subset of interface of std::unordered_map.
4372 KeyT, ValueT must be POD because they will be stored in VmaVector.
4373 */
4374 template<typename KeyT, typename ValueT>
4375 class VmaMap
4376 {
4377 public:
4378  typedef VmaPair<KeyT, ValueT> PairType;
4379  typedef PairType* iterator;
4380 
4381  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4382 
4383  iterator begin() { return m_Vector.begin(); }
4384  iterator end() { return m_Vector.end(); }
4385 
4386  void insert(const PairType& pair);
4387  iterator find(const KeyT& key);
4388  void erase(iterator it);
4389 
4390 private:
4391  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4392 };
4393 
4394 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4395 
4396 template<typename FirstT, typename SecondT>
4397 struct VmaPairFirstLess
4398 {
4399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4400  {
4401  return lhs.first < rhs.first;
4402  }
4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4404  {
4405  return lhs.first < rhsFirst;
4406  }
4407 };
4408 
4409 template<typename KeyT, typename ValueT>
4410 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4411 {
4412  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4413  m_Vector.data(),
4414  m_Vector.data() + m_Vector.size(),
4415  pair,
4416  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4417  VmaVectorInsert(m_Vector, indexToInsert, pair);
4418 }
4419 
4420 template<typename KeyT, typename ValueT>
4421 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4422 {
4423  PairType* it = VmaBinaryFindFirstNotLess(
4424  m_Vector.data(),
4425  m_Vector.data() + m_Vector.size(),
4426  key,
4427  VmaPairFirstLess<KeyT, ValueT>());
4428  if((it != m_Vector.end()) && (it->first == key))
4429  {
4430  return it;
4431  }
4432  else
4433  {
4434  return m_Vector.end();
4435  }
4436 }
4437 
4438 template<typename KeyT, typename ValueT>
4439 void VmaMap<KeyT, ValueT>::erase(iterator it)
4440 {
4441  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4442 }
4443 
4444 #endif // #if VMA_USE_STL_UNORDERED_MAP
4445 
4446 #endif // #if 0
4447 
4449 
4450 class VmaDeviceMemoryBlock;
4451 
4452 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4453 
4454 struct VmaAllocation_T
4455 {
4456  VMA_CLASS_NO_COPY(VmaAllocation_T)
4457 private:
4458  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4459 
4460  enum FLAGS
4461  {
4462  FLAG_USER_DATA_STRING = 0x01,
4463  };
4464 
4465 public:
4466  enum ALLOCATION_TYPE
4467  {
4468  ALLOCATION_TYPE_NONE,
4469  ALLOCATION_TYPE_BLOCK,
4470  ALLOCATION_TYPE_DEDICATED,
4471  };
4472 
4473  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4474  m_Alignment(1),
4475  m_Size(0),
4476  m_pUserData(VMA_NULL),
4477  m_LastUseFrameIndex(currentFrameIndex),
4478  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4479  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4480  m_MapCount(0),
4481  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4482  {
4483 #if VMA_STATS_STRING_ENABLED
4484  m_CreationFrameIndex = currentFrameIndex;
4485  m_BufferImageUsage = 0;
4486 #endif
4487  }
4488 
4489  ~VmaAllocation_T()
4490  {
4491  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4492 
4493  // Check if owned string was freed.
4494  VMA_ASSERT(m_pUserData == VMA_NULL);
4495  }
4496 
4497  void InitBlockAllocation(
4498  VmaPool hPool,
4499  VmaDeviceMemoryBlock* block,
4500  VkDeviceSize offset,
4501  VkDeviceSize alignment,
4502  VkDeviceSize size,
4503  VmaSuballocationType suballocationType,
4504  bool mapped,
4505  bool canBecomeLost)
4506  {
4507  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4508  VMA_ASSERT(block != VMA_NULL);
4509  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4510  m_Alignment = alignment;
4511  m_Size = size;
4512  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4513  m_SuballocationType = (uint8_t)suballocationType;
4514  m_BlockAllocation.m_hPool = hPool;
4515  m_BlockAllocation.m_Block = block;
4516  m_BlockAllocation.m_Offset = offset;
4517  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4518  }
4519 
4520  void InitLost()
4521  {
4522  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4523  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4524  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4525  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4526  m_BlockAllocation.m_Block = VMA_NULL;
4527  m_BlockAllocation.m_Offset = 0;
4528  m_BlockAllocation.m_CanBecomeLost = true;
4529  }
4530 
4531  void ChangeBlockAllocation(
4532  VmaAllocator hAllocator,
4533  VmaDeviceMemoryBlock* block,
4534  VkDeviceSize offset);
4535 
4536  void ChangeSize(VkDeviceSize newSize);
4537 
4538  // pMappedData not null means allocation is created with MAPPED flag.
4539  void InitDedicatedAllocation(
4540  uint32_t memoryTypeIndex,
4541  VkDeviceMemory hMemory,
4542  VmaSuballocationType suballocationType,
4543  void* pMappedData,
4544  VkDeviceSize size)
4545  {
4546  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4547  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4548  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4549  m_Alignment = 0;
4550  m_Size = size;
4551  m_SuballocationType = (uint8_t)suballocationType;
4552  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4553  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4554  m_DedicatedAllocation.m_hMemory = hMemory;
4555  m_DedicatedAllocation.m_pMappedData = pMappedData;
4556  }
4557 
4558  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4559  VkDeviceSize GetAlignment() const { return m_Alignment; }
4560  VkDeviceSize GetSize() const { return m_Size; }
4561  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4562  void* GetUserData() const { return m_pUserData; }
4563  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4564  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4565 
4566  VmaDeviceMemoryBlock* GetBlock() const
4567  {
4568  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4569  return m_BlockAllocation.m_Block;
4570  }
4571  VkDeviceSize GetOffset() const;
4572  VkDeviceMemory GetMemory() const;
4573  uint32_t GetMemoryTypeIndex() const;
4574  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4575  void* GetMappedData() const;
4576  bool CanBecomeLost() const;
4577  VmaPool GetPool() const;
4578 
4579  uint32_t GetLastUseFrameIndex() const
4580  {
4581  return m_LastUseFrameIndex.load();
4582  }
4583  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4584  {
4585  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4586  }
4587  /*
4588  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4589  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4590  - Else, returns false.
4591 
4592  If hAllocation is already lost, assert - you should not call it then.
4593  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4594  */
4595  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4596 
4597  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4598  {
4599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4600  outInfo.blockCount = 1;
4601  outInfo.allocationCount = 1;
4602  outInfo.unusedRangeCount = 0;
4603  outInfo.usedBytes = m_Size;
4604  outInfo.unusedBytes = 0;
4605  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4606  outInfo.unusedRangeSizeMin = UINT64_MAX;
4607  outInfo.unusedRangeSizeMax = 0;
4608  }
4609 
4610  void BlockAllocMap();
4611  void BlockAllocUnmap();
4612  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4613  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4614 
4615 #if VMA_STATS_STRING_ENABLED
4616  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4617  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4618 
4619  void InitBufferImageUsage(uint32_t bufferImageUsage)
4620  {
4621  VMA_ASSERT(m_BufferImageUsage == 0);
4622  m_BufferImageUsage = bufferImageUsage;
4623  }
4624 
4625  void PrintParameters(class VmaJsonWriter& json) const;
4626 #endif
4627 
4628 private:
4629  VkDeviceSize m_Alignment;
4630  VkDeviceSize m_Size;
4631  void* m_pUserData;
4632  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4633  uint8_t m_Type; // ALLOCATION_TYPE
4634  uint8_t m_SuballocationType; // VmaSuballocationType
4635  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4636  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4637  uint8_t m_MapCount;
4638  uint8_t m_Flags; // enum FLAGS
4639 
4640  // Allocation out of VmaDeviceMemoryBlock.
4641  struct BlockAllocation
4642  {
4643  VmaPool m_hPool; // Null if belongs to general memory.
4644  VmaDeviceMemoryBlock* m_Block;
4645  VkDeviceSize m_Offset;
4646  bool m_CanBecomeLost;
4647  };
4648 
4649  // Allocation for an object that has its own private VkDeviceMemory.
4650  struct DedicatedAllocation
4651  {
4652  uint32_t m_MemoryTypeIndex;
4653  VkDeviceMemory m_hMemory;
4654  void* m_pMappedData; // Not null means memory is mapped.
4655  };
4656 
4657  union
4658  {
4659  // Allocation out of VmaDeviceMemoryBlock.
4660  BlockAllocation m_BlockAllocation;
4661  // Allocation for an object that has its own private VkDeviceMemory.
4662  DedicatedAllocation m_DedicatedAllocation;
4663  };
4664 
4665 #if VMA_STATS_STRING_ENABLED
4666  uint32_t m_CreationFrameIndex;
4667  uint32_t m_BufferImageUsage; // 0 if unknown.
4668 #endif
4669 
4670  void FreeUserDataString(VmaAllocator hAllocator);
4671 };
4672 
4673 /*
4674 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4675 allocated memory block or free.
4676 */
4677 struct VmaSuballocation
4678 {
4679  VkDeviceSize offset;
4680  VkDeviceSize size;
4681  VmaAllocation hAllocation;
4682  VmaSuballocationType type;
4683 };
4684 
4685 // Comparator for offsets.
4686 struct VmaSuballocationOffsetLess
4687 {
4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4689  {
4690  return lhs.offset < rhs.offset;
4691  }
4692 };
4693 struct VmaSuballocationOffsetGreater
4694 {
4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4696  {
4697  return lhs.offset > rhs.offset;
4698  }
4699 };
4700 
4701 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4702 
4703 // Cost of one additional allocation lost, as equivalent in bytes.
4704 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4705 
4706 /*
4707 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4708 
4709 If canMakeOtherLost was false:
4710 - item points to a FREE suballocation.
4711 - itemsToMakeLostCount is 0.
4712 
4713 If canMakeOtherLost was true:
4714 - item points to first of sequence of suballocations, which are either FREE,
4715  or point to VmaAllocations that can become lost.
4716 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4717  the requested allocation to succeed.
4718 */
4719 struct VmaAllocationRequest
4720 {
4721  VkDeviceSize offset;
4722  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4723  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4724  VmaSuballocationList::iterator item;
4725  size_t itemsToMakeLostCount;
4726  void* customData;
4727 
4728  VkDeviceSize CalcCost() const
4729  {
4730  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4731  }
4732 };
4733 
4734 /*
4735 Data structure used for bookkeeping of allocations and unused ranges of memory
4736 in a single VkDeviceMemory block.
4737 */
4738 class VmaBlockMetadata
4739 {
4740 public:
4741  VmaBlockMetadata(VmaAllocator hAllocator);
4742  virtual ~VmaBlockMetadata() { }
4743  virtual void Init(VkDeviceSize size) { m_Size = size; }
4744 
4745  // Validates all data structures inside this object. If not valid, returns false.
4746  virtual bool Validate() const = 0;
4747  VkDeviceSize GetSize() const { return m_Size; }
4748  virtual size_t GetAllocationCount() const = 0;
4749  virtual VkDeviceSize GetSumFreeSize() const = 0;
4750  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4751  // Returns true if this block is empty - contains only single free suballocation.
4752  virtual bool IsEmpty() const = 0;
4753 
4754  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4755  // Shouldn't modify blockCount.
4756  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4757 
4758 #if VMA_STATS_STRING_ENABLED
4759  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4760 #endif
4761 
4762  // Tries to find a place for suballocation with given parameters inside this block.
4763  // If succeeded, fills pAllocationRequest and returns true.
4764  // If failed, returns false.
4765  virtual bool CreateAllocationRequest(
4766  uint32_t currentFrameIndex,
4767  uint32_t frameInUseCount,
4768  VkDeviceSize bufferImageGranularity,
4769  VkDeviceSize allocSize,
4770  VkDeviceSize allocAlignment,
4771  bool upperAddress,
4772  VmaSuballocationType allocType,
4773  bool canMakeOtherLost,
4774  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4775  VmaAllocationRequest* pAllocationRequest) = 0;
4776 
4777  virtual bool MakeRequestedAllocationsLost(
4778  uint32_t currentFrameIndex,
4779  uint32_t frameInUseCount,
4780  VmaAllocationRequest* pAllocationRequest) = 0;
4781 
4782  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4783 
4784  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4785 
4786  // Makes actual allocation based on request. Request must already be checked and valid.
4787  virtual void Alloc(
4788  const VmaAllocationRequest& request,
4789  VmaSuballocationType type,
4790  VkDeviceSize allocSize,
4791  bool upperAddress,
4792  VmaAllocation hAllocation) = 0;
4793 
4794  // Frees suballocation assigned to given memory region.
4795  virtual void Free(const VmaAllocation allocation) = 0;
4796  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4797 
4798  // Tries to resize (grow or shrink) space for given allocation, in place.
4799  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4800 
4801 protected:
4802  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4803 
4804 #if VMA_STATS_STRING_ENABLED
4805  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4806  VkDeviceSize unusedBytes,
4807  size_t allocationCount,
4808  size_t unusedRangeCount) const;
4809  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4810  VkDeviceSize offset,
4811  VmaAllocation hAllocation) const;
4812  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4813  VkDeviceSize offset,
4814  VkDeviceSize size) const;
4815  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4816 #endif
4817 
4818 private:
4819  VkDeviceSize m_Size;
4820  const VkAllocationCallbacks* m_pAllocationCallbacks;
4821 };
4822 
4823 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4824  VMA_ASSERT(0 && "Validation failed: " #cond); \
4825  return false; \
4826  } } while(false)
4827 
4828 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4829 {
4830  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4831 public:
4832  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4833  virtual ~VmaBlockMetadata_Generic();
4834  virtual void Init(VkDeviceSize size);
4835 
4836  virtual bool Validate() const;
4837  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4838  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4839  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4840  virtual bool IsEmpty() const;
4841 
4842  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4843  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4844 
4845 #if VMA_STATS_STRING_ENABLED
4846  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4847 #endif
4848 
4849  virtual bool CreateAllocationRequest(
4850  uint32_t currentFrameIndex,
4851  uint32_t frameInUseCount,
4852  VkDeviceSize bufferImageGranularity,
4853  VkDeviceSize allocSize,
4854  VkDeviceSize allocAlignment,
4855  bool upperAddress,
4856  VmaSuballocationType allocType,
4857  bool canMakeOtherLost,
4858  uint32_t strategy,
4859  VmaAllocationRequest* pAllocationRequest);
4860 
4861  virtual bool MakeRequestedAllocationsLost(
4862  uint32_t currentFrameIndex,
4863  uint32_t frameInUseCount,
4864  VmaAllocationRequest* pAllocationRequest);
4865 
4866  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4867 
4868  virtual VkResult CheckCorruption(const void* pBlockData);
4869 
4870  virtual void Alloc(
4871  const VmaAllocationRequest& request,
4872  VmaSuballocationType type,
4873  VkDeviceSize allocSize,
4874  bool upperAddress,
4875  VmaAllocation hAllocation);
4876 
4877  virtual void Free(const VmaAllocation allocation);
4878  virtual void FreeAtOffset(VkDeviceSize offset);
4879 
4880  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4881 
4882 private:
4883  uint32_t m_FreeCount;
4884  VkDeviceSize m_SumFreeSize;
4885  VmaSuballocationList m_Suballocations;
4886  // Suballocations that are free and have size greater than certain threshold.
4887  // Sorted by size, ascending.
4888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4889 
4890  bool ValidateFreeSuballocationList() const;
4891 
4892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4893  // If yes, fills pOffset and returns true. If no, returns false.
4894  bool CheckAllocation(
4895  uint32_t currentFrameIndex,
4896  uint32_t frameInUseCount,
4897  VkDeviceSize bufferImageGranularity,
4898  VkDeviceSize allocSize,
4899  VkDeviceSize allocAlignment,
4900  VmaSuballocationType allocType,
4901  VmaSuballocationList::const_iterator suballocItem,
4902  bool canMakeOtherLost,
4903  VkDeviceSize* pOffset,
4904  size_t* itemsToMakeLostCount,
4905  VkDeviceSize* pSumFreeSize,
4906  VkDeviceSize* pSumItemSize) const;
4907  // Given free suballocation, it merges it with following one, which must also be free.
4908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4909  // Releases given suballocation, making it free.
4910  // Merges it with adjacent free suballocations if applicable.
4911  // Returns iterator to new free suballocation at this place.
4912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4913  // Given free suballocation, it inserts it into sorted list of
4914  // m_FreeSuballocationsBySize if it's suitable.
4915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4916  // Given free suballocation, it removes it from sorted list of
4917  // m_FreeSuballocationsBySize if it's suitable.
4918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4919 };
4920 
4921 /*
4922 Allocations and their references in internal data structure look like this:
4923 
4924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4925 
4926  0 +-------+
4927  | |
4928  | |
4929  | |
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount]
4932  +-------+
4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4934  +-------+
4935  | ... |
4936  +-------+
4937  | Alloc | 1st[1st.size() - 1]
4938  +-------+
4939  | |
4940  | |
4941  | |
4942 GetSize() +-------+
4943 
4944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4945 
4946  0 +-------+
4947  | Alloc | 2nd[0]
4948  +-------+
4949  | Alloc | 2nd[1]
4950  +-------+
4951  | ... |
4952  +-------+
4953  | Alloc | 2nd[2nd.size() - 1]
4954  +-------+
4955  | |
4956  | |
4957  | |
4958  +-------+
4959  | Alloc | 1st[m_1stNullItemsBeginCount]
4960  +-------+
4961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4962  +-------+
4963  | ... |
4964  +-------+
4965  | Alloc | 1st[1st.size() - 1]
4966  +-------+
4967  | |
4968 GetSize() +-------+
4969 
4970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4971 
4972  0 +-------+
4973  | |
4974  | |
4975  | |
4976  +-------+
4977  | Alloc | 1st[m_1stNullItemsBeginCount]
4978  +-------+
4979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4980  +-------+
4981  | ... |
4982  +-------+
4983  | Alloc | 1st[1st.size() - 1]
4984  +-------+
4985  | |
4986  | |
4987  | |
4988  +-------+
4989  | Alloc | 2nd[2nd.size() - 1]
4990  +-------+
4991  | ... |
4992  +-------+
4993  | Alloc | 2nd[1]
4994  +-------+
4995  | Alloc | 2nd[0]
4996 GetSize() +-------+
4997 
4998 */
4999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5000 {
5001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5002 public:
5003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5004  virtual ~VmaBlockMetadata_Linear();
5005  virtual void Init(VkDeviceSize size);
5006 
5007  virtual bool Validate() const;
5008  virtual size_t GetAllocationCount() const;
5009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5012 
5013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5015 
5016 #if VMA_STATS_STRING_ENABLED
5017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5018 #endif
5019 
5020  virtual bool CreateAllocationRequest(
5021  uint32_t currentFrameIndex,
5022  uint32_t frameInUseCount,
5023  VkDeviceSize bufferImageGranularity,
5024  VkDeviceSize allocSize,
5025  VkDeviceSize allocAlignment,
5026  bool upperAddress,
5027  VmaSuballocationType allocType,
5028  bool canMakeOtherLost,
5029  uint32_t strategy,
5030  VmaAllocationRequest* pAllocationRequest);
5031 
5032  virtual bool MakeRequestedAllocationsLost(
5033  uint32_t currentFrameIndex,
5034  uint32_t frameInUseCount,
5035  VmaAllocationRequest* pAllocationRequest);
5036 
5037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5038 
5039  virtual VkResult CheckCorruption(const void* pBlockData);
5040 
5041  virtual void Alloc(
5042  const VmaAllocationRequest& request,
5043  VmaSuballocationType type,
5044  VkDeviceSize allocSize,
5045  bool upperAddress,
5046  VmaAllocation hAllocation);
5047 
5048  virtual void Free(const VmaAllocation allocation);
5049  virtual void FreeAtOffset(VkDeviceSize offset);
5050 
5051 private:
5052  /*
5053  There are two suballocation vectors, used in ping-pong way.
5054  The one with index m_1stVectorIndex is called 1st.
5055  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5056  2nd can be non-empty only when 1st is not empty.
5057  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5058  */
5059  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5060 
5061  enum SECOND_VECTOR_MODE
5062  {
5063  SECOND_VECTOR_EMPTY,
5064  /*
5065  Suballocations in 2nd vector are created later than the ones in 1st, but they
5066  all have smaller offset.
5067  */
5068  SECOND_VECTOR_RING_BUFFER,
5069  /*
5070  Suballocations in 2nd vector are upper side of double stack.
5071  They all have offsets higher than those in 1st vector.
5072  Top of this stack means smaller offsets, but higher indices in this vector.
5073  */
5074  SECOND_VECTOR_DOUBLE_STACK,
5075  };
5076 
5077  VkDeviceSize m_SumFreeSize;
5078  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5079  uint32_t m_1stVectorIndex;
5080  SECOND_VECTOR_MODE m_2ndVectorMode;
5081 
5082  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5083  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5084  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5085  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5086 
5087  // Number of items in 1st vector with hAllocation = null at the beginning.
5088  size_t m_1stNullItemsBeginCount;
5089  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5090  size_t m_1stNullItemsMiddleCount;
5091  // Number of items in 2nd vector with hAllocation = null.
5092  size_t m_2ndNullItemsCount;
5093 
5094  bool ShouldCompact1st() const;
5095  void CleanupAfterFree();
5096 };
5097 
5098 /*
5099 - GetSize() is the original size of allocated memory block.
5100 - m_UsableSize is this size aligned down to a power of two.
5101  All allocations and calculations happen relative to m_UsableSize.
5102 - GetUnusableSize() is the difference between them.
5103  It is repoted as separate, unused range, not available for allocations.
5104 
5105 Node at level 0 has size = m_UsableSize.
5106 Each next level contains nodes with size 2 times smaller than current level.
5107 m_LevelCount is the maximum number of levels to use in the current object.
5108 */
5109 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5110 {
5111  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5112 public:
5113  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5114  virtual ~VmaBlockMetadata_Buddy();
5115  virtual void Init(VkDeviceSize size);
5116 
5117  virtual bool Validate() const;
5118  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5119  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5120  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5121  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5122 
5123  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5124  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5125 
5126 #if VMA_STATS_STRING_ENABLED
5127  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5128 #endif
5129 
5130  virtual bool CreateAllocationRequest(
5131  uint32_t currentFrameIndex,
5132  uint32_t frameInUseCount,
5133  VkDeviceSize bufferImageGranularity,
5134  VkDeviceSize allocSize,
5135  VkDeviceSize allocAlignment,
5136  bool upperAddress,
5137  VmaSuballocationType allocType,
5138  bool canMakeOtherLost,
5139  uint32_t strategy,
5140  VmaAllocationRequest* pAllocationRequest);
5141 
5142  virtual bool MakeRequestedAllocationsLost(
5143  uint32_t currentFrameIndex,
5144  uint32_t frameInUseCount,
5145  VmaAllocationRequest* pAllocationRequest);
5146 
5147  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5148 
5149  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5150 
5151  virtual void Alloc(
5152  const VmaAllocationRequest& request,
5153  VmaSuballocationType type,
5154  VkDeviceSize allocSize,
5155  bool upperAddress,
5156  VmaAllocation hAllocation);
5157 
5158  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5159  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5160 
5161 private:
5162  static const VkDeviceSize MIN_NODE_SIZE = 32;
5163  static const size_t MAX_LEVELS = 30;
5164 
5165  struct ValidationContext
5166  {
5167  size_t calculatedAllocationCount;
5168  size_t calculatedFreeCount;
5169  VkDeviceSize calculatedSumFreeSize;
5170 
5171  ValidationContext() :
5172  calculatedAllocationCount(0),
5173  calculatedFreeCount(0),
5174  calculatedSumFreeSize(0) { }
5175  };
5176 
5177  struct Node
5178  {
5179  VkDeviceSize offset;
5180  enum TYPE
5181  {
5182  TYPE_FREE,
5183  TYPE_ALLOCATION,
5184  TYPE_SPLIT,
5185  TYPE_COUNT
5186  } type;
5187  Node* parent;
5188  Node* buddy;
5189 
5190  union
5191  {
5192  struct
5193  {
5194  Node* prev;
5195  Node* next;
5196  } free;
5197  struct
5198  {
5199  VmaAllocation alloc;
5200  } allocation;
5201  struct
5202  {
5203  Node* leftChild;
5204  } split;
5205  };
5206  };
5207 
5208  // Size of the memory block aligned down to a power of two.
5209  VkDeviceSize m_UsableSize;
5210  uint32_t m_LevelCount;
5211 
5212  Node* m_Root;
5213  struct {
5214  Node* front;
5215  Node* back;
5216  } m_FreeList[MAX_LEVELS];
5217  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5218  size_t m_AllocationCount;
5219  // Number of nodes in the tree with type == TYPE_FREE.
5220  size_t m_FreeCount;
5221  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5222  VkDeviceSize m_SumFreeSize;
5223 
5224  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5225  void DeleteNode(Node* node);
5226  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5227  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5228  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5229  // Alloc passed just for validation. Can be null.
5230  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5231  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5232  // Adds node to the front of FreeList at given level.
5233  // node->type must be FREE.
5234  // node->free.prev, next can be undefined.
5235  void AddToFreeListFront(uint32_t level, Node* node);
5236  // Removes node from FreeList at given level.
5237  // node->type must be FREE.
5238  // node->free.prev, next stay untouched.
5239  void RemoveFromFreeList(uint32_t level, Node* node);
5240 
5241 #if VMA_STATS_STRING_ENABLED
5242  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5243 #endif
5244 };
5245 
5246 /*
5247 Represents a single block of device memory (`VkDeviceMemory`) with all the
5248 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5249 
5250 Thread-safety: This class must be externally synchronized.
5251 */
5252 class VmaDeviceMemoryBlock
5253 {
5254  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5255 public:
5256  VmaBlockMetadata* m_pMetadata;
5257 
5258  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5259 
5260  ~VmaDeviceMemoryBlock()
5261  {
5262  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5263  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5264  }
5265 
5266  // Always call after construction.
5267  void Init(
5268  VmaAllocator hAllocator,
5269  uint32_t newMemoryTypeIndex,
5270  VkDeviceMemory newMemory,
5271  VkDeviceSize newSize,
5272  uint32_t id,
5273  uint32_t algorithm);
5274  // Always call before destruction.
5275  void Destroy(VmaAllocator allocator);
5276 
5277  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5278  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5279  uint32_t GetId() const { return m_Id; }
5280  void* GetMappedData() const { return m_pMappedData; }
5281 
5282  // Validates all data structures inside this object. If not valid, returns false.
5283  bool Validate() const;
5284 
5285  VkResult CheckCorruption(VmaAllocator hAllocator);
5286 
5287  // ppData can be null.
5288  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5289  void Unmap(VmaAllocator hAllocator, uint32_t count);
5290 
5291  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5292  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5293 
5294  VkResult BindBufferMemory(
5295  const VmaAllocator hAllocator,
5296  const VmaAllocation hAllocation,
5297  VkBuffer hBuffer);
5298  VkResult BindImageMemory(
5299  const VmaAllocator hAllocator,
5300  const VmaAllocation hAllocation,
5301  VkImage hImage);
5302 
5303 private:
5304  uint32_t m_MemoryTypeIndex;
5305  uint32_t m_Id;
5306  VkDeviceMemory m_hMemory;
5307 
5308  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5309  // Also protects m_MapCount, m_pMappedData.
5310  VMA_MUTEX m_Mutex;
5311  uint32_t m_MapCount;
5312  void* m_pMappedData;
5313 };
5314 
5315 struct VmaPointerLess
5316 {
5317  bool operator()(const void* lhs, const void* rhs) const
5318  {
5319  return lhs < rhs;
5320  }
5321 };
5322 
5323 class VmaDefragmentator;
5324 
5325 /*
5326 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5327 Vulkan memory type.
5328 
5329 Synchronized internally with a mutex.
5330 */
5331 struct VmaBlockVector
5332 {
5333  VMA_CLASS_NO_COPY(VmaBlockVector)
5334 public:
5335  VmaBlockVector(
5336  VmaAllocator hAllocator,
5337  uint32_t memoryTypeIndex,
5338  VkDeviceSize preferredBlockSize,
5339  size_t minBlockCount,
5340  size_t maxBlockCount,
5341  VkDeviceSize bufferImageGranularity,
5342  uint32_t frameInUseCount,
5343  bool isCustomPool,
5344  bool explicitBlockSize,
5345  uint32_t algorithm);
5346  ~VmaBlockVector();
5347 
5348  VkResult CreateMinBlocks();
5349 
5350  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5351  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5352  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5353  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5354  uint32_t GetAlgorithm() const { return m_Algorithm; }
5355 
5356  void GetPoolStats(VmaPoolStats* pStats);
5357 
5358  bool IsEmpty() const { return m_Blocks.empty(); }
5359  bool IsCorruptionDetectionEnabled() const;
5360 
5361  VkResult Allocate(
5362  VmaPool hCurrentPool,
5363  uint32_t currentFrameIndex,
5364  VkDeviceSize size,
5365  VkDeviceSize alignment,
5366  const VmaAllocationCreateInfo& createInfo,
5367  VmaSuballocationType suballocType,
5368  VmaAllocation* pAllocation);
5369 
5370  void Free(
5371  VmaAllocation hAllocation);
5372 
5373  // Adds statistics of this BlockVector to pStats.
5374  void AddStats(VmaStats* pStats);
5375 
5376 #if VMA_STATS_STRING_ENABLED
5377  void PrintDetailedMap(class VmaJsonWriter& json);
5378 #endif
5379 
5380  void MakePoolAllocationsLost(
5381  uint32_t currentFrameIndex,
5382  size_t* pLostAllocationCount);
5383  VkResult CheckCorruption();
5384 
5385  VmaDefragmentator* EnsureDefragmentator(
5386  VmaAllocator hAllocator,
5387  uint32_t currentFrameIndex);
5388 
5389  VkResult Defragment(
5390  VmaDefragmentationStats* pDefragmentationStats,
5391  VkDeviceSize& maxBytesToMove,
5392  uint32_t& maxAllocationsToMove);
5393 
5394  void DestroyDefragmentator();
5395 
5396 private:
5397  friend class VmaDefragmentator;
5398 
5399  const VmaAllocator m_hAllocator;
5400  const uint32_t m_MemoryTypeIndex;
5401  const VkDeviceSize m_PreferredBlockSize;
5402  const size_t m_MinBlockCount;
5403  const size_t m_MaxBlockCount;
5404  const VkDeviceSize m_BufferImageGranularity;
5405  const uint32_t m_FrameInUseCount;
5406  const bool m_IsCustomPool;
5407  const bool m_ExplicitBlockSize;
5408  const uint32_t m_Algorithm;
5409  bool m_HasEmptyBlock;
5410  VMA_MUTEX m_Mutex;
5411  // Incrementally sorted by sumFreeSize, ascending.
5412  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5413  /* There can be at most one allocation that is completely empty - a
5414  hysteresis to avoid pessimistic case of alternating creation and destruction
5415  of a VkDeviceMemory. */
5416  VmaDefragmentator* m_pDefragmentator;
5417  uint32_t m_NextBlockId;
5418 
5419  VkDeviceSize CalcMaxBlockSize() const;
5420 
5421  // Finds and removes given block from vector.
5422  void Remove(VmaDeviceMemoryBlock* pBlock);
5423 
5424  // Performs single step in sorting m_Blocks. They may not be fully sorted
5425  // after this call.
5426  void IncrementallySortBlocks();
5427 
5428  // To be used only without CAN_MAKE_OTHER_LOST flag.
5429  VkResult AllocateFromBlock(
5430  VmaDeviceMemoryBlock* pBlock,
5431  VmaPool hCurrentPool,
5432  uint32_t currentFrameIndex,
5433  VkDeviceSize size,
5434  VkDeviceSize alignment,
5435  VmaAllocationCreateFlags allocFlags,
5436  void* pUserData,
5437  VmaSuballocationType suballocType,
5438  uint32_t strategy,
5439  VmaAllocation* pAllocation);
5440 
5441  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5442 };
5443 
5444 struct VmaPool_T
5445 {
5446  VMA_CLASS_NO_COPY(VmaPool_T)
5447 public:
5448  VmaBlockVector m_BlockVector;
5449 
5450  VmaPool_T(
5451  VmaAllocator hAllocator,
5452  const VmaPoolCreateInfo& createInfo,
5453  VkDeviceSize preferredBlockSize);
5454  ~VmaPool_T();
5455 
5456  uint32_t GetId() const { return m_Id; }
5457  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5458 
5459 #if VMA_STATS_STRING_ENABLED
5460  //void PrintDetailedMap(class VmaStringBuilder& sb);
5461 #endif
5462 
5463 private:
5464  uint32_t m_Id;
5465 };
5466 
5467 class VmaDefragmentator
5468 {
5469  VMA_CLASS_NO_COPY(VmaDefragmentator)
5470 private:
5471  const VmaAllocator m_hAllocator;
5472  VmaBlockVector* const m_pBlockVector;
5473  uint32_t m_CurrentFrameIndex;
5474  VkDeviceSize m_BytesMoved;
5475  uint32_t m_AllocationsMoved;
5476 
5477  struct AllocationInfo
5478  {
5479  VmaAllocation m_hAllocation;
5480  VkBool32* m_pChanged;
5481 
5482  AllocationInfo() :
5483  m_hAllocation(VK_NULL_HANDLE),
5484  m_pChanged(VMA_NULL)
5485  {
5486  }
5487  };
5488 
5489  struct AllocationInfoSizeGreater
5490  {
5491  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5492  {
5493  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5494  }
5495  };
5496 
5497  // Used between AddAllocation and Defragment.
5498  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5499 
5500  struct BlockInfo
5501  {
5502  VmaDeviceMemoryBlock* m_pBlock;
5503  bool m_HasNonMovableAllocations;
5504  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5505 
5506  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5507  m_pBlock(VMA_NULL),
5508  m_HasNonMovableAllocations(true),
5509  m_Allocations(pAllocationCallbacks),
5510  m_pMappedDataForDefragmentation(VMA_NULL)
5511  {
5512  }
5513 
5514  void CalcHasNonMovableAllocations()
5515  {
5516  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5517  const size_t defragmentAllocCount = m_Allocations.size();
5518  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5519  }
5520 
5521  void SortAllocationsBySizeDescecnding()
5522  {
5523  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5524  }
5525 
5526  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5527  void Unmap(VmaAllocator hAllocator);
5528 
5529  private:
5530  // Not null if mapped for defragmentation only, not originally mapped.
5531  void* m_pMappedDataForDefragmentation;
5532  };
5533 
5534  struct BlockPointerLess
5535  {
5536  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5537  {
5538  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5539  }
5540  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5541  {
5542  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5543  }
5544  };
5545 
5546  // 1. Blocks with some non-movable allocations go first.
5547  // 2. Blocks with smaller sumFreeSize go first.
5548  struct BlockInfoCompareMoveDestination
5549  {
5550  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5551  {
5552  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5553  {
5554  return true;
5555  }
5556  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5557  {
5558  return false;
5559  }
5560  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5561  {
5562  return true;
5563  }
5564  return false;
5565  }
5566  };
5567 
5568  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5569  BlockInfoVector m_Blocks;
5570 
5571  VkResult DefragmentRound(
5572  VkDeviceSize maxBytesToMove,
5573  uint32_t maxAllocationsToMove);
5574 
5575  static bool MoveMakesSense(
5576  size_t dstBlockIndex, VkDeviceSize dstOffset,
5577  size_t srcBlockIndex, VkDeviceSize srcOffset);
5578 
5579 public:
5580  VmaDefragmentator(
5581  VmaAllocator hAllocator,
5582  VmaBlockVector* pBlockVector,
5583  uint32_t currentFrameIndex);
5584 
5585  ~VmaDefragmentator();
5586 
5587  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5588  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5589 
5590  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5591 
5592  VkResult Defragment(
5593  VkDeviceSize maxBytesToMove,
5594  uint32_t maxAllocationsToMove);
5595 };
5596 
5597 #if VMA_RECORDING_ENABLED
5598 
5599 class VmaRecorder
5600 {
5601 public:
5602  VmaRecorder();
5603  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5604  void WriteConfiguration(
5605  const VkPhysicalDeviceProperties& devProps,
5606  const VkPhysicalDeviceMemoryProperties& memProps,
5607  bool dedicatedAllocationExtensionEnabled);
5608  ~VmaRecorder();
5609 
5610  void RecordCreateAllocator(uint32_t frameIndex);
5611  void RecordDestroyAllocator(uint32_t frameIndex);
5612  void RecordCreatePool(uint32_t frameIndex,
5613  const VmaPoolCreateInfo& createInfo,
5614  VmaPool pool);
5615  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5616  void RecordAllocateMemory(uint32_t frameIndex,
5617  const VkMemoryRequirements& vkMemReq,
5618  const VmaAllocationCreateInfo& createInfo,
5619  VmaAllocation allocation);
5620  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5621  const VkMemoryRequirements& vkMemReq,
5622  bool requiresDedicatedAllocation,
5623  bool prefersDedicatedAllocation,
5624  const VmaAllocationCreateInfo& createInfo,
5625  VmaAllocation allocation);
5626  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5627  const VkMemoryRequirements& vkMemReq,
5628  bool requiresDedicatedAllocation,
5629  bool prefersDedicatedAllocation,
5630  const VmaAllocationCreateInfo& createInfo,
5631  VmaAllocation allocation);
5632  void RecordFreeMemory(uint32_t frameIndex,
5633  VmaAllocation allocation);
5634  void RecordResizeAllocation(
5635  uint32_t frameIndex,
5636  VmaAllocation allocation,
5637  VkDeviceSize newSize);
5638  void RecordSetAllocationUserData(uint32_t frameIndex,
5639  VmaAllocation allocation,
5640  const void* pUserData);
5641  void RecordCreateLostAllocation(uint32_t frameIndex,
5642  VmaAllocation allocation);
5643  void RecordMapMemory(uint32_t frameIndex,
5644  VmaAllocation allocation);
5645  void RecordUnmapMemory(uint32_t frameIndex,
5646  VmaAllocation allocation);
5647  void RecordFlushAllocation(uint32_t frameIndex,
5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5649  void RecordInvalidateAllocation(uint32_t frameIndex,
5650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5651  void RecordCreateBuffer(uint32_t frameIndex,
5652  const VkBufferCreateInfo& bufCreateInfo,
5653  const VmaAllocationCreateInfo& allocCreateInfo,
5654  VmaAllocation allocation);
5655  void RecordCreateImage(uint32_t frameIndex,
5656  const VkImageCreateInfo& imageCreateInfo,
5657  const VmaAllocationCreateInfo& allocCreateInfo,
5658  VmaAllocation allocation);
5659  void RecordDestroyBuffer(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordDestroyImage(uint32_t frameIndex,
5662  VmaAllocation allocation);
5663  void RecordTouchAllocation(uint32_t frameIndex,
5664  VmaAllocation allocation);
5665  void RecordGetAllocationInfo(uint32_t frameIndex,
5666  VmaAllocation allocation);
5667  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5668  VmaPool pool);
5669 
5670 private:
5671  struct CallParams
5672  {
5673  uint32_t threadId;
5674  double time;
5675  };
5676 
5677  class UserDataString
5678  {
5679  public:
5680  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5681  const char* GetString() const { return m_Str; }
5682 
5683  private:
5684  char m_PtrStr[17];
5685  const char* m_Str;
5686  };
5687 
5688  bool m_UseMutex;
5689  VmaRecordFlags m_Flags;
5690  FILE* m_File;
5691  VMA_MUTEX m_FileMutex;
5692  int64_t m_Freq;
5693  int64_t m_StartCounter;
5694 
5695  void GetBasicParams(CallParams& outParams);
5696  void Flush();
5697 };
5698 
5699 #endif // #if VMA_RECORDING_ENABLED
5700 
5701 // Main allocator object.
5702 struct VmaAllocator_T
5703 {
5704  VMA_CLASS_NO_COPY(VmaAllocator_T)
5705 public:
5706  bool m_UseMutex;
5707  bool m_UseKhrDedicatedAllocation;
5708  VkDevice m_hDevice;
5709  bool m_AllocationCallbacksSpecified;
5710  VkAllocationCallbacks m_AllocationCallbacks;
5711  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5712 
5713  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5714  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5715  VMA_MUTEX m_HeapSizeLimitMutex;
5716 
5717  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5718  VkPhysicalDeviceMemoryProperties m_MemProps;
5719 
5720  // Default pools.
5721  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5722 
5723  // Each vector is sorted by memory (handle value).
5724  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5725  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5726  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5727 
5728  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5729  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5730  ~VmaAllocator_T();
5731 
5732  const VkAllocationCallbacks* GetAllocationCallbacks() const
5733  {
5734  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5735  }
5736  const VmaVulkanFunctions& GetVulkanFunctions() const
5737  {
5738  return m_VulkanFunctions;
5739  }
5740 
5741  VkDeviceSize GetBufferImageGranularity() const
5742  {
5743  return VMA_MAX(
5744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5746  }
5747 
5748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5750 
5751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5752  {
5753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5755  }
5756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5758  {
5759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5761  }
5762  // Minimum alignment for all allocations in specific memory type.
5763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5764  {
5765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5768  }
5769 
5770  bool IsIntegratedGpu() const
5771  {
5772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5773  }
5774 
5775 #if VMA_RECORDING_ENABLED
5776  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5777 #endif
5778 
5779  void GetBufferMemoryRequirements(
5780  VkBuffer hBuffer,
5781  VkMemoryRequirements& memReq,
5782  bool& requiresDedicatedAllocation,
5783  bool& prefersDedicatedAllocation) const;
5784  void GetImageMemoryRequirements(
5785  VkImage hImage,
5786  VkMemoryRequirements& memReq,
5787  bool& requiresDedicatedAllocation,
5788  bool& prefersDedicatedAllocation) const;
5789 
5790  // Main allocation function.
5791  VkResult AllocateMemory(
5792  const VkMemoryRequirements& vkMemReq,
5793  bool requiresDedicatedAllocation,
5794  bool prefersDedicatedAllocation,
5795  VkBuffer dedicatedBuffer,
5796  VkImage dedicatedImage,
5797  const VmaAllocationCreateInfo& createInfo,
5798  VmaSuballocationType suballocType,
5799  VmaAllocation* pAllocation);
5800 
5801  // Main deallocation function.
5802  void FreeMemory(const VmaAllocation allocation);
5803 
5804  VkResult ResizeAllocation(
5805  const VmaAllocation alloc,
5806  VkDeviceSize newSize);
5807 
5808  void CalculateStats(VmaStats* pStats);
5809 
5810 #if VMA_STATS_STRING_ENABLED
5811  void PrintDetailedMap(class VmaJsonWriter& json);
5812 #endif
5813 
5814  VkResult Defragment(
5815  VmaAllocation* pAllocations,
5816  size_t allocationCount,
5817  VkBool32* pAllocationsChanged,
5818  const VmaDefragmentationInfo* pDefragmentationInfo,
5819  VmaDefragmentationStats* pDefragmentationStats);
5820 
5821  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5822  bool TouchAllocation(VmaAllocation hAllocation);
5823 
5824  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5825  void DestroyPool(VmaPool pool);
5826  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5827 
5828  void SetCurrentFrameIndex(uint32_t frameIndex);
5829  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5830 
5831  void MakePoolAllocationsLost(
5832  VmaPool hPool,
5833  size_t* pLostAllocationCount);
5834  VkResult CheckPoolCorruption(VmaPool hPool);
5835  VkResult CheckCorruption(uint32_t memoryTypeBits);
5836 
5837  void CreateLostAllocation(VmaAllocation* pAllocation);
5838 
5839  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5840  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5841 
5842  VkResult Map(VmaAllocation hAllocation, void** ppData);
5843  void Unmap(VmaAllocation hAllocation);
5844 
5845  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5846  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5847 
5848  void FlushOrInvalidateAllocation(
5849  VmaAllocation hAllocation,
5850  VkDeviceSize offset, VkDeviceSize size,
5851  VMA_CACHE_OPERATION op);
5852 
5853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5854 
5855 private:
5856  VkDeviceSize m_PreferredLargeHeapBlockSize;
5857 
5858  VkPhysicalDevice m_PhysicalDevice;
5859  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5860 
5861  VMA_MUTEX m_PoolsMutex;
5862  // Protected by m_PoolsMutex. Sorted by pointer value.
5863  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5864  uint32_t m_NextPoolId;
5865 
5866  VmaVulkanFunctions m_VulkanFunctions;
5867 
5868 #if VMA_RECORDING_ENABLED
5869  VmaRecorder* m_pRecorder;
5870 #endif
5871 
5872  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5873 
5874  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5875 
5876  VkResult AllocateMemoryOfType(
5877  VkDeviceSize size,
5878  VkDeviceSize alignment,
5879  bool dedicatedAllocation,
5880  VkBuffer dedicatedBuffer,
5881  VkImage dedicatedImage,
5882  const VmaAllocationCreateInfo& createInfo,
5883  uint32_t memTypeIndex,
5884  VmaSuballocationType suballocType,
5885  VmaAllocation* pAllocation);
5886 
5887  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5888  VkResult AllocateDedicatedMemory(
5889  VkDeviceSize size,
5890  VmaSuballocationType suballocType,
5891  uint32_t memTypeIndex,
5892  bool map,
5893  bool isUserDataString,
5894  void* pUserData,
5895  VkBuffer dedicatedBuffer,
5896  VkImage dedicatedImage,
5897  VmaAllocation* pAllocation);
5898 
5899  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5900  void FreeDedicatedMemory(VmaAllocation allocation);
5901 };
5902 
5904 // Memory allocation #2 after VmaAllocator_T definition
5905 
5906 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5907 {
5908  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5909 }
5910 
5911 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5912 {
5913  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5914 }
5915 
5916 template<typename T>
5917 static T* VmaAllocate(VmaAllocator hAllocator)
5918 {
5919  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5920 }
5921 
5922 template<typename T>
5923 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5924 {
5925  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5926 }
5927 
5928 template<typename T>
5929 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5930 {
5931  if(ptr != VMA_NULL)
5932  {
5933  ptr->~T();
5934  VmaFree(hAllocator, ptr);
5935  }
5936 }
5937 
5938 template<typename T>
5939 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5940 {
5941  if(ptr != VMA_NULL)
5942  {
5943  for(size_t i = count; i--; )
5944  ptr[i].~T();
5945  VmaFree(hAllocator, ptr);
5946  }
5947 }
5948 
5950 // VmaStringBuilder
5951 
5952 #if VMA_STATS_STRING_ENABLED
5953 
5954 class VmaStringBuilder
5955 {
5956 public:
5957  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5958  size_t GetLength() const { return m_Data.size(); }
5959  const char* GetData() const { return m_Data.data(); }
5960 
5961  void Add(char ch) { m_Data.push_back(ch); }
5962  void Add(const char* pStr);
5963  void AddNewLine() { Add('\n'); }
5964  void AddNumber(uint32_t num);
5965  void AddNumber(uint64_t num);
5966  void AddPointer(const void* ptr);
5967 
5968 private:
5969  VmaVector< char, VmaStlAllocator<char> > m_Data;
5970 };
5971 
5972 void VmaStringBuilder::Add(const char* pStr)
5973 {
5974  const size_t strLen = strlen(pStr);
5975  if(strLen > 0)
5976  {
5977  const size_t oldCount = m_Data.size();
5978  m_Data.resize(oldCount + strLen);
5979  memcpy(m_Data.data() + oldCount, pStr, strLen);
5980  }
5981 }
5982 
5983 void VmaStringBuilder::AddNumber(uint32_t num)
5984 {
5985  char buf[11];
5986  VmaUint32ToStr(buf, sizeof(buf), num);
5987  Add(buf);
5988 }
5989 
5990 void VmaStringBuilder::AddNumber(uint64_t num)
5991 {
5992  char buf[21];
5993  VmaUint64ToStr(buf, sizeof(buf), num);
5994  Add(buf);
5995 }
5996 
5997 void VmaStringBuilder::AddPointer(const void* ptr)
5998 {
5999  char buf[21];
6000  VmaPtrToStr(buf, sizeof(buf), ptr);
6001  Add(buf);
6002 }
6003 
6004 #endif // #if VMA_STATS_STRING_ENABLED
6005 
6007 // VmaJsonWriter
6008 
6009 #if VMA_STATS_STRING_ENABLED
6010 
6011 class VmaJsonWriter
6012 {
6013  VMA_CLASS_NO_COPY(VmaJsonWriter)
6014 public:
6015  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6016  ~VmaJsonWriter();
6017 
6018  void BeginObject(bool singleLine = false);
6019  void EndObject();
6020 
6021  void BeginArray(bool singleLine = false);
6022  void EndArray();
6023 
6024  void WriteString(const char* pStr);
6025  void BeginString(const char* pStr = VMA_NULL);
6026  void ContinueString(const char* pStr);
6027  void ContinueString(uint32_t n);
6028  void ContinueString(uint64_t n);
6029  void ContinueString_Pointer(const void* ptr);
6030  void EndString(const char* pStr = VMA_NULL);
6031 
6032  void WriteNumber(uint32_t n);
6033  void WriteNumber(uint64_t n);
6034  void WriteBool(bool b);
6035  void WriteNull();
6036 
6037 private:
6038  static const char* const INDENT;
6039 
6040  enum COLLECTION_TYPE
6041  {
6042  COLLECTION_TYPE_OBJECT,
6043  COLLECTION_TYPE_ARRAY,
6044  };
6045  struct StackItem
6046  {
6047  COLLECTION_TYPE type;
6048  uint32_t valueCount;
6049  bool singleLineMode;
6050  };
6051 
6052  VmaStringBuilder& m_SB;
6053  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6054  bool m_InsideString;
6055 
6056  void BeginValue(bool isString);
6057  void WriteIndent(bool oneLess = false);
6058 };
6059 
6060 const char* const VmaJsonWriter::INDENT = " ";
6061 
6062 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6063  m_SB(sb),
6064  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6065  m_InsideString(false)
6066 {
6067 }
6068 
6069 VmaJsonWriter::~VmaJsonWriter()
6070 {
6071  VMA_ASSERT(!m_InsideString);
6072  VMA_ASSERT(m_Stack.empty());
6073 }
6074 
6075 void VmaJsonWriter::BeginObject(bool singleLine)
6076 {
6077  VMA_ASSERT(!m_InsideString);
6078 
6079  BeginValue(false);
6080  m_SB.Add('{');
6081 
6082  StackItem item;
6083  item.type = COLLECTION_TYPE_OBJECT;
6084  item.valueCount = 0;
6085  item.singleLineMode = singleLine;
6086  m_Stack.push_back(item);
6087 }
6088 
6089 void VmaJsonWriter::EndObject()
6090 {
6091  VMA_ASSERT(!m_InsideString);
6092 
6093  WriteIndent(true);
6094  m_SB.Add('}');
6095 
6096  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6097  m_Stack.pop_back();
6098 }
6099 
6100 void VmaJsonWriter::BeginArray(bool singleLine)
6101 {
6102  VMA_ASSERT(!m_InsideString);
6103 
6104  BeginValue(false);
6105  m_SB.Add('[');
6106 
6107  StackItem item;
6108  item.type = COLLECTION_TYPE_ARRAY;
6109  item.valueCount = 0;
6110  item.singleLineMode = singleLine;
6111  m_Stack.push_back(item);
6112 }
6113 
6114 void VmaJsonWriter::EndArray()
6115 {
6116  VMA_ASSERT(!m_InsideString);
6117 
6118  WriteIndent(true);
6119  m_SB.Add(']');
6120 
6121  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6122  m_Stack.pop_back();
6123 }
6124 
6125 void VmaJsonWriter::WriteString(const char* pStr)
6126 {
6127  BeginString(pStr);
6128  EndString();
6129 }
6130 
6131 void VmaJsonWriter::BeginString(const char* pStr)
6132 {
6133  VMA_ASSERT(!m_InsideString);
6134 
6135  BeginValue(true);
6136  m_SB.Add('"');
6137  m_InsideString = true;
6138  if(pStr != VMA_NULL && pStr[0] != '\0')
6139  {
6140  ContinueString(pStr);
6141  }
6142 }
6143 
6144 void VmaJsonWriter::ContinueString(const char* pStr)
6145 {
6146  VMA_ASSERT(m_InsideString);
6147 
6148  const size_t strLen = strlen(pStr);
6149  for(size_t i = 0; i < strLen; ++i)
6150  {
6151  char ch = pStr[i];
6152  if(ch == '\\')
6153  {
6154  m_SB.Add("\\\\");
6155  }
6156  else if(ch == '"')
6157  {
6158  m_SB.Add("\\\"");
6159  }
6160  else if(ch >= 32)
6161  {
6162  m_SB.Add(ch);
6163  }
6164  else switch(ch)
6165  {
6166  case '\b':
6167  m_SB.Add("\\b");
6168  break;
6169  case '\f':
6170  m_SB.Add("\\f");
6171  break;
6172  case '\n':
6173  m_SB.Add("\\n");
6174  break;
6175  case '\r':
6176  m_SB.Add("\\r");
6177  break;
6178  case '\t':
6179  m_SB.Add("\\t");
6180  break;
6181  default:
6182  VMA_ASSERT(0 && "Character not currently supported.");
6183  break;
6184  }
6185  }
6186 }
6187 
6188 void VmaJsonWriter::ContinueString(uint32_t n)
6189 {
6190  VMA_ASSERT(m_InsideString);
6191  m_SB.AddNumber(n);
6192 }
6193 
6194 void VmaJsonWriter::ContinueString(uint64_t n)
6195 {
6196  VMA_ASSERT(m_InsideString);
6197  m_SB.AddNumber(n);
6198 }
6199 
6200 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6201 {
6202  VMA_ASSERT(m_InsideString);
6203  m_SB.AddPointer(ptr);
6204 }
6205 
6206 void VmaJsonWriter::EndString(const char* pStr)
6207 {
6208  VMA_ASSERT(m_InsideString);
6209  if(pStr != VMA_NULL && pStr[0] != '\0')
6210  {
6211  ContinueString(pStr);
6212  }
6213  m_SB.Add('"');
6214  m_InsideString = false;
6215 }
6216 
6217 void VmaJsonWriter::WriteNumber(uint32_t n)
6218 {
6219  VMA_ASSERT(!m_InsideString);
6220  BeginValue(false);
6221  m_SB.AddNumber(n);
6222 }
6223 
6224 void VmaJsonWriter::WriteNumber(uint64_t n)
6225 {
6226  VMA_ASSERT(!m_InsideString);
6227  BeginValue(false);
6228  m_SB.AddNumber(n);
6229 }
6230 
6231 void VmaJsonWriter::WriteBool(bool b)
6232 {
6233  VMA_ASSERT(!m_InsideString);
6234  BeginValue(false);
6235  m_SB.Add(b ? "true" : "false");
6236 }
6237 
6238 void VmaJsonWriter::WriteNull()
6239 {
6240  VMA_ASSERT(!m_InsideString);
6241  BeginValue(false);
6242  m_SB.Add("null");
6243 }
6244 
6245 void VmaJsonWriter::BeginValue(bool isString)
6246 {
6247  if(!m_Stack.empty())
6248  {
6249  StackItem& currItem = m_Stack.back();
6250  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6251  currItem.valueCount % 2 == 0)
6252  {
6253  VMA_ASSERT(isString);
6254  }
6255 
6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6257  currItem.valueCount % 2 != 0)
6258  {
6259  m_SB.Add(": ");
6260  }
6261  else if(currItem.valueCount > 0)
6262  {
6263  m_SB.Add(", ");
6264  WriteIndent();
6265  }
6266  else
6267  {
6268  WriteIndent();
6269  }
6270  ++currItem.valueCount;
6271  }
6272 }
6273 
6274 void VmaJsonWriter::WriteIndent(bool oneLess)
6275 {
6276  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6277  {
6278  m_SB.AddNewLine();
6279 
6280  size_t count = m_Stack.size();
6281  if(count > 0 && oneLess)
6282  {
6283  --count;
6284  }
6285  for(size_t i = 0; i < count; ++i)
6286  {
6287  m_SB.Add(INDENT);
6288  }
6289  }
6290 }
6291 
6292 #endif // #if VMA_STATS_STRING_ENABLED
6293 
6295 
6296 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6297 {
6298  if(IsUserDataString())
6299  {
6300  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6301 
6302  FreeUserDataString(hAllocator);
6303 
6304  if(pUserData != VMA_NULL)
6305  {
6306  const char* const newStrSrc = (char*)pUserData;
6307  const size_t newStrLen = strlen(newStrSrc);
6308  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6309  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6310  m_pUserData = newStrDst;
6311  }
6312  }
6313  else
6314  {
6315  m_pUserData = pUserData;
6316  }
6317 }
6318 
6319 void VmaAllocation_T::ChangeBlockAllocation(
6320  VmaAllocator hAllocator,
6321  VmaDeviceMemoryBlock* block,
6322  VkDeviceSize offset)
6323 {
6324  VMA_ASSERT(block != VMA_NULL);
6325  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6326 
6327  // Move mapping reference counter from old block to new block.
6328  if(block != m_BlockAllocation.m_Block)
6329  {
6330  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6331  if(IsPersistentMap())
6332  ++mapRefCount;
6333  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6334  block->Map(hAllocator, mapRefCount, VMA_NULL);
6335  }
6336 
6337  m_BlockAllocation.m_Block = block;
6338  m_BlockAllocation.m_Offset = offset;
6339 }
6340 
6341 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6342 {
6343  VMA_ASSERT(newSize > 0);
6344  m_Size = newSize;
6345 }
6346 
6347 VkDeviceSize VmaAllocation_T::GetOffset() const
6348 {
6349  switch(m_Type)
6350  {
6351  case ALLOCATION_TYPE_BLOCK:
6352  return m_BlockAllocation.m_Offset;
6353  case ALLOCATION_TYPE_DEDICATED:
6354  return 0;
6355  default:
6356  VMA_ASSERT(0);
6357  return 0;
6358  }
6359 }
6360 
6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
6362 {
6363  switch(m_Type)
6364  {
6365  case ALLOCATION_TYPE_BLOCK:
6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
6367  case ALLOCATION_TYPE_DEDICATED:
6368  return m_DedicatedAllocation.m_hMemory;
6369  default:
6370  VMA_ASSERT(0);
6371  return VK_NULL_HANDLE;
6372  }
6373 }
6374 
6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6376 {
6377  switch(m_Type)
6378  {
6379  case ALLOCATION_TYPE_BLOCK:
6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6381  case ALLOCATION_TYPE_DEDICATED:
6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
6383  default:
6384  VMA_ASSERT(0);
6385  return UINT32_MAX;
6386  }
6387 }
6388 
6389 void* VmaAllocation_T::GetMappedData() const
6390 {
6391  switch(m_Type)
6392  {
6393  case ALLOCATION_TYPE_BLOCK:
6394  if(m_MapCount != 0)
6395  {
6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6397  VMA_ASSERT(pBlockData != VMA_NULL);
6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6399  }
6400  else
6401  {
6402  return VMA_NULL;
6403  }
6404  break;
6405  case ALLOCATION_TYPE_DEDICATED:
6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6407  return m_DedicatedAllocation.m_pMappedData;
6408  default:
6409  VMA_ASSERT(0);
6410  return VMA_NULL;
6411  }
6412 }
6413 
6414 bool VmaAllocation_T::CanBecomeLost() const
6415 {
6416  switch(m_Type)
6417  {
6418  case ALLOCATION_TYPE_BLOCK:
6419  return m_BlockAllocation.m_CanBecomeLost;
6420  case ALLOCATION_TYPE_DEDICATED:
6421  return false;
6422  default:
6423  VMA_ASSERT(0);
6424  return false;
6425  }
6426 }
6427 
6428 VmaPool VmaAllocation_T::GetPool() const
6429 {
6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6431  return m_BlockAllocation.m_hPool;
6432 }
6433 
6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6435 {
6436  VMA_ASSERT(CanBecomeLost());
6437 
6438  /*
6439  Warning: This is a carefully designed algorithm.
6440  Do not modify unless you really know what you're doing :)
6441  */
6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6443  for(;;)
6444  {
6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6446  {
6447  VMA_ASSERT(0);
6448  return false;
6449  }
6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6451  {
6452  return false;
6453  }
6454  else // Last use time earlier than current time.
6455  {
6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6457  {
6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6460  return true;
6461  }
6462  }
6463  }
6464 }
6465 
6466 #if VMA_STATS_STRING_ENABLED
6467 
6468 // Correspond to values of enum VmaSuballocationType.
6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6470  "FREE",
6471  "UNKNOWN",
6472  "BUFFER",
6473  "IMAGE_UNKNOWN",
6474  "IMAGE_LINEAR",
6475  "IMAGE_OPTIMAL",
6476 };
6477 
6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6479 {
6480  json.WriteString("Type");
6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6482 
6483  json.WriteString("Size");
6484  json.WriteNumber(m_Size);
6485 
6486  if(m_pUserData != VMA_NULL)
6487  {
6488  json.WriteString("UserData");
6489  if(IsUserDataString())
6490  {
6491  json.WriteString((const char*)m_pUserData);
6492  }
6493  else
6494  {
6495  json.BeginString();
6496  json.ContinueString_Pointer(m_pUserData);
6497  json.EndString();
6498  }
6499  }
6500 
6501  json.WriteString("CreationFrameIndex");
6502  json.WriteNumber(m_CreationFrameIndex);
6503 
6504  json.WriteString("LastUseFrameIndex");
6505  json.WriteNumber(GetLastUseFrameIndex());
6506 
6507  if(m_BufferImageUsage != 0)
6508  {
6509  json.WriteString("Usage");
6510  json.WriteNumber(m_BufferImageUsage);
6511  }
6512 }
6513 
6514 #endif
6515 
6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6517 {
6518  VMA_ASSERT(IsUserDataString());
6519  if(m_pUserData != VMA_NULL)
6520  {
6521  char* const oldStr = (char*)m_pUserData;
6522  const size_t oldStrLen = strlen(oldStr);
6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6524  m_pUserData = VMA_NULL;
6525  }
6526 }
6527 
6528 void VmaAllocation_T::BlockAllocMap()
6529 {
6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6531 
6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6533  {
6534  ++m_MapCount;
6535  }
6536  else
6537  {
6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6539  }
6540 }
6541 
6542 void VmaAllocation_T::BlockAllocUnmap()
6543 {
6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6545 
6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6547  {
6548  --m_MapCount;
6549  }
6550  else
6551  {
6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6553  }
6554 }
6555 
6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6557 {
6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6559 
6560  if(m_MapCount != 0)
6561  {
6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6563  {
6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6565  *ppData = m_DedicatedAllocation.m_pMappedData;
6566  ++m_MapCount;
6567  return VK_SUCCESS;
6568  }
6569  else
6570  {
6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6572  return VK_ERROR_MEMORY_MAP_FAILED;
6573  }
6574  }
6575  else
6576  {
6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6578  hAllocator->m_hDevice,
6579  m_DedicatedAllocation.m_hMemory,
6580  0, // offset
6581  VK_WHOLE_SIZE,
6582  0, // flags
6583  ppData);
6584  if(result == VK_SUCCESS)
6585  {
6586  m_DedicatedAllocation.m_pMappedData = *ppData;
6587  m_MapCount = 1;
6588  }
6589  return result;
6590  }
6591 }
6592 
6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6594 {
6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6596 
6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6598  {
6599  --m_MapCount;
6600  if(m_MapCount == 0)
6601  {
6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6604  hAllocator->m_hDevice,
6605  m_DedicatedAllocation.m_hMemory);
6606  }
6607  }
6608  else
6609  {
6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6611  }
6612 }
6613 
6614 #if VMA_STATS_STRING_ENABLED
6615 
6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6617 {
6618  json.BeginObject();
6619 
6620  json.WriteString("Blocks");
6621  json.WriteNumber(stat.blockCount);
6622 
6623  json.WriteString("Allocations");
6624  json.WriteNumber(stat.allocationCount);
6625 
6626  json.WriteString("UnusedRanges");
6627  json.WriteNumber(stat.unusedRangeCount);
6628 
6629  json.WriteString("UsedBytes");
6630  json.WriteNumber(stat.usedBytes);
6631 
6632  json.WriteString("UnusedBytes");
6633  json.WriteNumber(stat.unusedBytes);
6634 
6635  if(stat.allocationCount > 1)
6636  {
6637  json.WriteString("AllocationSize");
6638  json.BeginObject(true);
6639  json.WriteString("Min");
6640  json.WriteNumber(stat.allocationSizeMin);
6641  json.WriteString("Avg");
6642  json.WriteNumber(stat.allocationSizeAvg);
6643  json.WriteString("Max");
6644  json.WriteNumber(stat.allocationSizeMax);
6645  json.EndObject();
6646  }
6647 
6648  if(stat.unusedRangeCount > 1)
6649  {
6650  json.WriteString("UnusedRangeSize");
6651  json.BeginObject(true);
6652  json.WriteString("Min");
6653  json.WriteNumber(stat.unusedRangeSizeMin);
6654  json.WriteString("Avg");
6655  json.WriteNumber(stat.unusedRangeSizeAvg);
6656  json.WriteString("Max");
6657  json.WriteNumber(stat.unusedRangeSizeMax);
6658  json.EndObject();
6659  }
6660 
6661  json.EndObject();
6662 }
6663 
6664 #endif // #if VMA_STATS_STRING_ENABLED
6665 
6666 struct VmaSuballocationItemSizeLess
6667 {
6668  bool operator()(
6669  const VmaSuballocationList::iterator lhs,
6670  const VmaSuballocationList::iterator rhs) const
6671  {
6672  return lhs->size < rhs->size;
6673  }
6674  bool operator()(
6675  const VmaSuballocationList::iterator lhs,
6676  VkDeviceSize rhsSize) const
6677  {
6678  return lhs->size < rhsSize;
6679  }
6680 };
6681 
6682 
6684 // class VmaBlockMetadata
6685 
6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6687  m_Size(0),
6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6689 {
6690 }
6691 
6692 #if VMA_STATS_STRING_ENABLED
6693 
6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6695  VkDeviceSize unusedBytes,
6696  size_t allocationCount,
6697  size_t unusedRangeCount) const
6698 {
6699  json.BeginObject();
6700 
6701  json.WriteString("TotalBytes");
6702  json.WriteNumber(GetSize());
6703 
6704  json.WriteString("UnusedBytes");
6705  json.WriteNumber(unusedBytes);
6706 
6707  json.WriteString("Allocations");
6708  json.WriteNumber((uint64_t)allocationCount);
6709 
6710  json.WriteString("UnusedRanges");
6711  json.WriteNumber((uint64_t)unusedRangeCount);
6712 
6713  json.WriteString("Suballocations");
6714  json.BeginArray();
6715 }
6716 
6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6718  VkDeviceSize offset,
6719  VmaAllocation hAllocation) const
6720 {
6721  json.BeginObject(true);
6722 
6723  json.WriteString("Offset");
6724  json.WriteNumber(offset);
6725 
6726  hAllocation->PrintParameters(json);
6727 
6728  json.EndObject();
6729 }
6730 
6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6732  VkDeviceSize offset,
6733  VkDeviceSize size) const
6734 {
6735  json.BeginObject(true);
6736 
6737  json.WriteString("Offset");
6738  json.WriteNumber(offset);
6739 
6740  json.WriteString("Type");
6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6742 
6743  json.WriteString("Size");
6744  json.WriteNumber(size);
6745 
6746  json.EndObject();
6747 }
6748 
6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6750 {
6751  json.EndArray();
6752  json.EndObject();
6753 }
6754 
6755 #endif // #if VMA_STATS_STRING_ENABLED
6756 
6758 // class VmaBlockMetadata_Generic
6759 
6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6761  VmaBlockMetadata(hAllocator),
6762  m_FreeCount(0),
6763  m_SumFreeSize(0),
6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6766 {
6767 }
6768 
6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6770 {
6771 }
6772 
6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6774 {
6775  VmaBlockMetadata::Init(size);
6776 
6777  m_FreeCount = 1;
6778  m_SumFreeSize = size;
6779 
6780  VmaSuballocation suballoc = {};
6781  suballoc.offset = 0;
6782  suballoc.size = size;
6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6784  suballoc.hAllocation = VK_NULL_HANDLE;
6785 
6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6787  m_Suballocations.push_back(suballoc);
6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6789  --suballocItem;
6790  m_FreeSuballocationsBySize.push_back(suballocItem);
6791 }
6792 
6793 bool VmaBlockMetadata_Generic::Validate() const
6794 {
6795  VMA_VALIDATE(!m_Suballocations.empty());
6796 
6797  // Expected offset of new suballocation as calculated from previous ones.
6798  VkDeviceSize calculatedOffset = 0;
6799  // Expected number of free suballocations as calculated from traversing their list.
6800  uint32_t calculatedFreeCount = 0;
6801  // Expected sum size of free suballocations as calculated from traversing their list.
6802  VkDeviceSize calculatedSumFreeSize = 0;
6803  // Expected number of free suballocations that should be registered in
6804  // m_FreeSuballocationsBySize calculated from traversing their list.
6805  size_t freeSuballocationsToRegister = 0;
6806  // True if previous visited suballocation was free.
6807  bool prevFree = false;
6808 
6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6810  suballocItem != m_Suballocations.cend();
6811  ++suballocItem)
6812  {
6813  const VmaSuballocation& subAlloc = *suballocItem;
6814 
6815  // Actual offset of this suballocation doesn't match expected one.
6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6817 
6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6819  // Two adjacent free suballocations are invalid. They should be merged.
6820  VMA_VALIDATE(!prevFree || !currFree);
6821 
6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6823 
6824  if(currFree)
6825  {
6826  calculatedSumFreeSize += subAlloc.size;
6827  ++calculatedFreeCount;
6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6829  {
6830  ++freeSuballocationsToRegister;
6831  }
6832 
6833  // Margin required between allocations - every free space must be at least that large.
6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6835  }
6836  else
6837  {
6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6840 
6841  // Margin required between allocations - previous allocation must be free.
6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6843  }
6844 
6845  calculatedOffset += subAlloc.size;
6846  prevFree = currFree;
6847  }
6848 
6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6850  // match expected one.
6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6852 
6853  VkDeviceSize lastSize = 0;
6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6855  {
6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6857 
6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6860  // They must be sorted by size ascending.
6861  VMA_VALIDATE(suballocItem->size >= lastSize);
6862 
6863  lastSize = suballocItem->size;
6864  }
6865 
6866  // Check if totals match calculacted values.
6867  VMA_VALIDATE(ValidateFreeSuballocationList());
6868  VMA_VALIDATE(calculatedOffset == GetSize());
6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6871 
6872  return true;
6873 }
6874 
6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6876 {
6877  if(!m_FreeSuballocationsBySize.empty())
6878  {
6879  return m_FreeSuballocationsBySize.back()->size;
6880  }
6881  else
6882  {
6883  return 0;
6884  }
6885 }
6886 
6887 bool VmaBlockMetadata_Generic::IsEmpty() const
6888 {
6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6890 }
6891 
6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6893 {
6894  outInfo.blockCount = 1;
6895 
6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6897  outInfo.allocationCount = rangeCount - m_FreeCount;
6898  outInfo.unusedRangeCount = m_FreeCount;
6899 
6900  outInfo.unusedBytes = m_SumFreeSize;
6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6902 
6903  outInfo.allocationSizeMin = UINT64_MAX;
6904  outInfo.allocationSizeMax = 0;
6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
6906  outInfo.unusedRangeSizeMax = 0;
6907 
6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6909  suballocItem != m_Suballocations.cend();
6910  ++suballocItem)
6911  {
6912  const VmaSuballocation& suballoc = *suballocItem;
6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6914  {
6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6917  }
6918  else
6919  {
6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6922  }
6923  }
6924 }
6925 
6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6927 {
6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6929 
6930  inoutStats.size += GetSize();
6931  inoutStats.unusedSize += m_SumFreeSize;
6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
6933  inoutStats.unusedRangeCount += m_FreeCount;
6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6935 }
6936 
6937 #if VMA_STATS_STRING_ENABLED
6938 
6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6940 {
6941  PrintDetailedMap_Begin(json,
6942  m_SumFreeSize, // unusedBytes
6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6944  m_FreeCount); // unusedRangeCount
6945 
6946  size_t i = 0;
6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6948  suballocItem != m_Suballocations.cend();
6949  ++suballocItem, ++i)
6950  {
6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6952  {
6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6954  }
6955  else
6956  {
6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6958  }
6959  }
6960 
6961  PrintDetailedMap_End(json);
6962 }
6963 
6964 #endif // #if VMA_STATS_STRING_ENABLED
6965 
6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6967  uint32_t currentFrameIndex,
6968  uint32_t frameInUseCount,
6969  VkDeviceSize bufferImageGranularity,
6970  VkDeviceSize allocSize,
6971  VkDeviceSize allocAlignment,
6972  bool upperAddress,
6973  VmaSuballocationType allocType,
6974  bool canMakeOtherLost,
6975  uint32_t strategy,
6976  VmaAllocationRequest* pAllocationRequest)
6977 {
6978  VMA_ASSERT(allocSize > 0);
6979  VMA_ASSERT(!upperAddress);
6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6982  VMA_HEAVY_ASSERT(Validate());
6983 
6984  // There is not enough total free space in this block to fullfill the request: Early return.
6985  if(canMakeOtherLost == false &&
6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6987  {
6988  return false;
6989  }
6990 
6991  // New algorithm, efficiently searching freeSuballocationsBySize.
6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6993  if(freeSuballocCount > 0)
6994  {
6996  {
6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6999  m_FreeSuballocationsBySize.data(),
7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7001  allocSize + 2 * VMA_DEBUG_MARGIN,
7002  VmaSuballocationItemSizeLess());
7003  size_t index = it - m_FreeSuballocationsBySize.data();
7004  for(; index < freeSuballocCount; ++index)
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  m_FreeSuballocationsBySize[index],
7014  false, // canMakeOtherLost
7015  &pAllocationRequest->offset,
7016  &pAllocationRequest->itemsToMakeLostCount,
7017  &pAllocationRequest->sumFreeSize,
7018  &pAllocationRequest->sumItemSize))
7019  {
7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7021  return true;
7022  }
7023  }
7024  }
7025  else // WORST_FIT, FIRST_FIT
7026  {
7027  // Search staring from biggest suballocations.
7028  for(size_t index = freeSuballocCount; index--; )
7029  {
7030  if(CheckAllocation(
7031  currentFrameIndex,
7032  frameInUseCount,
7033  bufferImageGranularity,
7034  allocSize,
7035  allocAlignment,
7036  allocType,
7037  m_FreeSuballocationsBySize[index],
7038  false, // canMakeOtherLost
7039  &pAllocationRequest->offset,
7040  &pAllocationRequest->itemsToMakeLostCount,
7041  &pAllocationRequest->sumFreeSize,
7042  &pAllocationRequest->sumItemSize))
7043  {
7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7045  return true;
7046  }
7047  }
7048  }
7049  }
7050 
7051  if(canMakeOtherLost)
7052  {
7053  // Brute-force algorithm. TODO: Come up with something better.
7054 
7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7057 
7058  VmaAllocationRequest tmpAllocRequest = {};
7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7060  suballocIt != m_Suballocations.end();
7061  ++suballocIt)
7062  {
7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7064  suballocIt->hAllocation->CanBecomeLost())
7065  {
7066  if(CheckAllocation(
7067  currentFrameIndex,
7068  frameInUseCount,
7069  bufferImageGranularity,
7070  allocSize,
7071  allocAlignment,
7072  allocType,
7073  suballocIt,
7074  canMakeOtherLost,
7075  &tmpAllocRequest.offset,
7076  &tmpAllocRequest.itemsToMakeLostCount,
7077  &tmpAllocRequest.sumFreeSize,
7078  &tmpAllocRequest.sumItemSize))
7079  {
7080  tmpAllocRequest.item = suballocIt;
7081 
7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7084  {
7085  *pAllocationRequest = tmpAllocRequest;
7086  }
7087  }
7088  }
7089  }
7090 
7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7092  {
7093  return true;
7094  }
7095  }
7096 
7097  return false;
7098 }
7099 
7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7101  uint32_t currentFrameIndex,
7102  uint32_t frameInUseCount,
7103  VmaAllocationRequest* pAllocationRequest)
7104 {
7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
7106  {
7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7108  {
7109  ++pAllocationRequest->item;
7110  }
7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7115  {
7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7117  --pAllocationRequest->itemsToMakeLostCount;
7118  }
7119  else
7120  {
7121  return false;
7122  }
7123  }
7124 
7125  VMA_HEAVY_ASSERT(Validate());
7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7128 
7129  return true;
7130 }
7131 
7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7133 {
7134  uint32_t lostAllocationCount = 0;
7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7136  it != m_Suballocations.end();
7137  ++it)
7138  {
7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7140  it->hAllocation->CanBecomeLost() &&
7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7142  {
7143  it = FreeSuballocation(it);
7144  ++lostAllocationCount;
7145  }
7146  }
7147  return lostAllocationCount;
7148 }
7149 
7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7151 {
7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7153  it != m_Suballocations.end();
7154  ++it)
7155  {
7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7157  {
7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7159  {
7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7161  return VK_ERROR_VALIDATION_FAILED_EXT;
7162  }
7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7164  {
7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7166  return VK_ERROR_VALIDATION_FAILED_EXT;
7167  }
7168  }
7169  }
7170 
7171  return VK_SUCCESS;
7172 }
7173 
7174 void VmaBlockMetadata_Generic::Alloc(
7175  const VmaAllocationRequest& request,
7176  VmaSuballocationType type,
7177  VkDeviceSize allocSize,
7178  bool upperAddress,
7179  VmaAllocation hAllocation)
7180 {
7181  VMA_ASSERT(!upperAddress);
7182  VMA_ASSERT(request.item != m_Suballocations.end());
7183  VmaSuballocation& suballoc = *request.item;
7184  // Given suballocation is a free block.
7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7186  // Given offset is inside this suballocation.
7187  VMA_ASSERT(request.offset >= suballoc.offset);
7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7191 
7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7193  // it to become used.
7194  UnregisterFreeSuballocation(request.item);
7195 
7196  suballoc.offset = request.offset;
7197  suballoc.size = allocSize;
7198  suballoc.type = type;
7199  suballoc.hAllocation = hAllocation;
7200 
7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7202  if(paddingEnd)
7203  {
7204  VmaSuballocation paddingSuballoc = {};
7205  paddingSuballoc.offset = request.offset + allocSize;
7206  paddingSuballoc.size = paddingEnd;
7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7208  VmaSuballocationList::iterator next = request.item;
7209  ++next;
7210  const VmaSuballocationList::iterator paddingEndItem =
7211  m_Suballocations.insert(next, paddingSuballoc);
7212  RegisterFreeSuballocation(paddingEndItem);
7213  }
7214 
7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7216  if(paddingBegin)
7217  {
7218  VmaSuballocation paddingSuballoc = {};
7219  paddingSuballoc.offset = request.offset - paddingBegin;
7220  paddingSuballoc.size = paddingBegin;
7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7222  const VmaSuballocationList::iterator paddingBeginItem =
7223  m_Suballocations.insert(request.item, paddingSuballoc);
7224  RegisterFreeSuballocation(paddingBeginItem);
7225  }
7226 
7227  // Update totals.
7228  m_FreeCount = m_FreeCount - 1;
7229  if(paddingBegin > 0)
7230  {
7231  ++m_FreeCount;
7232  }
7233  if(paddingEnd > 0)
7234  {
7235  ++m_FreeCount;
7236  }
7237  m_SumFreeSize -= allocSize;
7238 }
7239 
7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7241 {
7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7243  suballocItem != m_Suballocations.end();
7244  ++suballocItem)
7245  {
7246  VmaSuballocation& suballoc = *suballocItem;
7247  if(suballoc.hAllocation == allocation)
7248  {
7249  FreeSuballocation(suballocItem);
7250  VMA_HEAVY_ASSERT(Validate());
7251  return;
7252  }
7253  }
7254  VMA_ASSERT(0 && "Not found!");
7255 }
7256 
7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7258 {
7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7260  suballocItem != m_Suballocations.end();
7261  ++suballocItem)
7262  {
7263  VmaSuballocation& suballoc = *suballocItem;
7264  if(suballoc.offset == offset)
7265  {
7266  FreeSuballocation(suballocItem);
7267  return;
7268  }
7269  }
7270  VMA_ASSERT(0 && "Not found!");
7271 }
7272 
7273 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7274 {
7275  typedef VmaSuballocationList::iterator iter_type;
7276  for(iter_type suballocItem = m_Suballocations.begin();
7277  suballocItem != m_Suballocations.end();
7278  ++suballocItem)
7279  {
7280  VmaSuballocation& suballoc = *suballocItem;
7281  if(suballoc.hAllocation == alloc)
7282  {
7283  iter_type nextItem = suballocItem;
7284  ++nextItem;
7285 
7286  // Should have been ensured on higher level.
7287  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7288 
7289  // Shrinking.
7290  if(newSize < alloc->GetSize())
7291  {
7292  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7293 
7294  // There is next item.
7295  if(nextItem != m_Suballocations.end())
7296  {
7297  // Next item is free.
7298  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7299  {
7300  // Grow this next item backward.
7301  UnregisterFreeSuballocation(nextItem);
7302  nextItem->offset -= sizeDiff;
7303  nextItem->size += sizeDiff;
7304  RegisterFreeSuballocation(nextItem);
7305  }
7306  // Next item is not free.
7307  else
7308  {
7309  // Create free item after current one.
7310  VmaSuballocation newFreeSuballoc;
7311  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7312  newFreeSuballoc.offset = suballoc.offset + newSize;
7313  newFreeSuballoc.size = sizeDiff;
7314  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7315  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7316  RegisterFreeSuballocation(newFreeSuballocIt);
7317 
7318  ++m_FreeCount;
7319  }
7320  }
7321  // This is the last item.
7322  else
7323  {
7324  // Create free item at the end.
7325  VmaSuballocation newFreeSuballoc;
7326  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7327  newFreeSuballoc.offset = suballoc.offset + newSize;
7328  newFreeSuballoc.size = sizeDiff;
7329  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7330  m_Suballocations.push_back(newFreeSuballoc);
7331 
7332  iter_type newFreeSuballocIt = m_Suballocations.end();
7333  RegisterFreeSuballocation(--newFreeSuballocIt);
7334 
7335  ++m_FreeCount;
7336  }
7337 
7338  suballoc.size = newSize;
7339  m_SumFreeSize += sizeDiff;
7340  }
7341  // Growing.
7342  else
7343  {
7344  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7345 
7346  // There is next item.
7347  if(nextItem != m_Suballocations.end())
7348  {
7349  // Next item is free.
7350  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7351  {
7352  // There is not enough free space, including margin.
7353  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7354  {
7355  return false;
7356  }
7357 
7358  // There is more free space than required.
7359  if(nextItem->size > sizeDiff)
7360  {
7361  // Move and shrink this next item.
7362  UnregisterFreeSuballocation(nextItem);
7363  nextItem->offset += sizeDiff;
7364  nextItem->size -= sizeDiff;
7365  RegisterFreeSuballocation(nextItem);
7366  }
7367  // There is exactly the amount of free space required.
7368  else
7369  {
7370  // Remove this next free item.
7371  UnregisterFreeSuballocation(nextItem);
7372  m_Suballocations.erase(nextItem);
7373  --m_FreeCount;
7374  }
7375  }
7376  // Next item is not free - there is no space to grow.
7377  else
7378  {
7379  return false;
7380  }
7381  }
7382  // This is the last item - there is no space to grow.
7383  else
7384  {
7385  return false;
7386  }
7387 
7388  suballoc.size = newSize;
7389  m_SumFreeSize -= sizeDiff;
7390  }
7391 
7392  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7393  return true;
7394  }
7395  }
7396  VMA_ASSERT(0 && "Not found!");
7397  return false;
7398 }
7399 
7400 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7401 {
7402  VkDeviceSize lastSize = 0;
7403  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7404  {
7405  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7406 
7407  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7408  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7409  VMA_VALIDATE(it->size >= lastSize);
7410  lastSize = it->size;
7411  }
7412  return true;
7413 }
7414 
7415 bool VmaBlockMetadata_Generic::CheckAllocation(
7416  uint32_t currentFrameIndex,
7417  uint32_t frameInUseCount,
7418  VkDeviceSize bufferImageGranularity,
7419  VkDeviceSize allocSize,
7420  VkDeviceSize allocAlignment,
7421  VmaSuballocationType allocType,
7422  VmaSuballocationList::const_iterator suballocItem,
7423  bool canMakeOtherLost,
7424  VkDeviceSize* pOffset,
7425  size_t* itemsToMakeLostCount,
7426  VkDeviceSize* pSumFreeSize,
7427  VkDeviceSize* pSumItemSize) const
7428 {
7429  VMA_ASSERT(allocSize > 0);
7430  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7431  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7432  VMA_ASSERT(pOffset != VMA_NULL);
7433 
7434  *itemsToMakeLostCount = 0;
7435  *pSumFreeSize = 0;
7436  *pSumItemSize = 0;
7437 
7438  if(canMakeOtherLost)
7439  {
7440  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7441  {
7442  *pSumFreeSize = suballocItem->size;
7443  }
7444  else
7445  {
7446  if(suballocItem->hAllocation->CanBecomeLost() &&
7447  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7448  {
7449  ++*itemsToMakeLostCount;
7450  *pSumItemSize = suballocItem->size;
7451  }
7452  else
7453  {
7454  return false;
7455  }
7456  }
7457 
7458  // Remaining size is too small for this request: Early return.
7459  if(GetSize() - suballocItem->offset < allocSize)
7460  {
7461  return false;
7462  }
7463 
7464  // Start from offset equal to beginning of this suballocation.
7465  *pOffset = suballocItem->offset;
7466 
7467  // Apply VMA_DEBUG_MARGIN at the beginning.
7468  if(VMA_DEBUG_MARGIN > 0)
7469  {
7470  *pOffset += VMA_DEBUG_MARGIN;
7471  }
7472 
7473  // Apply alignment.
7474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7475 
7476  // Check previous suballocations for BufferImageGranularity conflicts.
7477  // Make bigger alignment if necessary.
7478  if(bufferImageGranularity > 1)
7479  {
7480  bool bufferImageGranularityConflict = false;
7481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7482  while(prevSuballocItem != m_Suballocations.cbegin())
7483  {
7484  --prevSuballocItem;
7485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7487  {
7488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7489  {
7490  bufferImageGranularityConflict = true;
7491  break;
7492  }
7493  }
7494  else
7495  // Already on previous page.
7496  break;
7497  }
7498  if(bufferImageGranularityConflict)
7499  {
7500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7501  }
7502  }
7503 
7504  // Now that we have final *pOffset, check if we are past suballocItem.
7505  // If yes, return false - this function should be called for another suballocItem as starting point.
7506  if(*pOffset >= suballocItem->offset + suballocItem->size)
7507  {
7508  return false;
7509  }
7510 
7511  // Calculate padding at the beginning based on current offset.
7512  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7513 
7514  // Calculate required margin at the end.
7515  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7516 
7517  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7518  // Another early return check.
7519  if(suballocItem->offset + totalSize > GetSize())
7520  {
7521  return false;
7522  }
7523 
7524  // Advance lastSuballocItem until desired size is reached.
7525  // Update itemsToMakeLostCount.
7526  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7527  if(totalSize > suballocItem->size)
7528  {
7529  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7530  while(remainingSize > 0)
7531  {
7532  ++lastSuballocItem;
7533  if(lastSuballocItem == m_Suballocations.cend())
7534  {
7535  return false;
7536  }
7537  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7538  {
7539  *pSumFreeSize += lastSuballocItem->size;
7540  }
7541  else
7542  {
7543  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7544  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7545  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7546  {
7547  ++*itemsToMakeLostCount;
7548  *pSumItemSize += lastSuballocItem->size;
7549  }
7550  else
7551  {
7552  return false;
7553  }
7554  }
7555  remainingSize = (lastSuballocItem->size < remainingSize) ?
7556  remainingSize - lastSuballocItem->size : 0;
7557  }
7558  }
7559 
7560  // Check next suballocations for BufferImageGranularity conflicts.
7561  // If conflict exists, we must mark more allocations lost or fail.
7562  if(bufferImageGranularity > 1)
7563  {
7564  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7565  ++nextSuballocItem;
7566  while(nextSuballocItem != m_Suballocations.cend())
7567  {
7568  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7569  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7570  {
7571  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7572  {
7573  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7574  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7575  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7576  {
7577  ++*itemsToMakeLostCount;
7578  }
7579  else
7580  {
7581  return false;
7582  }
7583  }
7584  }
7585  else
7586  {
7587  // Already on next page.
7588  break;
7589  }
7590  ++nextSuballocItem;
7591  }
7592  }
7593  }
7594  else
7595  {
7596  const VmaSuballocation& suballoc = *suballocItem;
7597  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7598 
7599  *pSumFreeSize = suballoc.size;
7600 
7601  // Size of this suballocation is too small for this request: Early return.
7602  if(suballoc.size < allocSize)
7603  {
7604  return false;
7605  }
7606 
7607  // Start from offset equal to beginning of this suballocation.
7608  *pOffset = suballoc.offset;
7609 
7610  // Apply VMA_DEBUG_MARGIN at the beginning.
7611  if(VMA_DEBUG_MARGIN > 0)
7612  {
7613  *pOffset += VMA_DEBUG_MARGIN;
7614  }
7615 
7616  // Apply alignment.
7617  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7618 
7619  // Check previous suballocations for BufferImageGranularity conflicts.
7620  // Make bigger alignment if necessary.
7621  if(bufferImageGranularity > 1)
7622  {
7623  bool bufferImageGranularityConflict = false;
7624  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7625  while(prevSuballocItem != m_Suballocations.cbegin())
7626  {
7627  --prevSuballocItem;
7628  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7629  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7630  {
7631  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7632  {
7633  bufferImageGranularityConflict = true;
7634  break;
7635  }
7636  }
7637  else
7638  // Already on previous page.
7639  break;
7640  }
7641  if(bufferImageGranularityConflict)
7642  {
7643  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7644  }
7645  }
7646 
7647  // Calculate padding at the beginning based on current offset.
7648  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7649 
7650  // Calculate required margin at the end.
7651  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7652 
7653  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7654  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7655  {
7656  return false;
7657  }
7658 
7659  // Check next suballocations for BufferImageGranularity conflicts.
7660  // If conflict exists, allocation cannot be made here.
7661  if(bufferImageGranularity > 1)
7662  {
7663  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7664  ++nextSuballocItem;
7665  while(nextSuballocItem != m_Suballocations.cend())
7666  {
7667  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7668  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7669  {
7670  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7671  {
7672  return false;
7673  }
7674  }
7675  else
7676  {
7677  // Already on next page.
7678  break;
7679  }
7680  ++nextSuballocItem;
7681  }
7682  }
7683  }
7684 
7685  // All tests passed: Success. pOffset is already filled.
7686  return true;
7687 }
7688 
7689 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7690 {
7691  VMA_ASSERT(item != m_Suballocations.end());
7692  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7693 
7694  VmaSuballocationList::iterator nextItem = item;
7695  ++nextItem;
7696  VMA_ASSERT(nextItem != m_Suballocations.end());
7697  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7698 
7699  item->size += nextItem->size;
7700  --m_FreeCount;
7701  m_Suballocations.erase(nextItem);
7702 }
7703 
7704 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7705 {
7706  // Change this suballocation to be marked as free.
7707  VmaSuballocation& suballoc = *suballocItem;
7708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7709  suballoc.hAllocation = VK_NULL_HANDLE;
7710 
7711  // Update totals.
7712  ++m_FreeCount;
7713  m_SumFreeSize += suballoc.size;
7714 
7715  // Merge with previous and/or next suballocation if it's also free.
7716  bool mergeWithNext = false;
7717  bool mergeWithPrev = false;
7718 
7719  VmaSuballocationList::iterator nextItem = suballocItem;
7720  ++nextItem;
7721  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7722  {
7723  mergeWithNext = true;
7724  }
7725 
7726  VmaSuballocationList::iterator prevItem = suballocItem;
7727  if(suballocItem != m_Suballocations.begin())
7728  {
7729  --prevItem;
7730  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7731  {
7732  mergeWithPrev = true;
7733  }
7734  }
7735 
7736  if(mergeWithNext)
7737  {
7738  UnregisterFreeSuballocation(nextItem);
7739  MergeFreeWithNext(suballocItem);
7740  }
7741 
7742  if(mergeWithPrev)
7743  {
7744  UnregisterFreeSuballocation(prevItem);
7745  MergeFreeWithNext(prevItem);
7746  RegisterFreeSuballocation(prevItem);
7747  return prevItem;
7748  }
7749  else
7750  {
7751  RegisterFreeSuballocation(suballocItem);
7752  return suballocItem;
7753  }
7754 }
7755 
7756 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7757 {
7758  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7759  VMA_ASSERT(item->size > 0);
7760 
7761  // You may want to enable this validation at the beginning or at the end of
7762  // this function, depending on what do you want to check.
7763  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7764 
7765  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7766  {
7767  if(m_FreeSuballocationsBySize.empty())
7768  {
7769  m_FreeSuballocationsBySize.push_back(item);
7770  }
7771  else
7772  {
7773  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7774  }
7775  }
7776 
7777  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7778 }
7779 
7780 
7781 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7782 {
7783  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7784  VMA_ASSERT(item->size > 0);
7785 
7786  // You may want to enable this validation at the beginning or at the end of
7787  // this function, depending on what do you want to check.
7788  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7789 
7790  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7791  {
7792  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7793  m_FreeSuballocationsBySize.data(),
7794  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7795  item,
7796  VmaSuballocationItemSizeLess());
7797  for(size_t index = it - m_FreeSuballocationsBySize.data();
7798  index < m_FreeSuballocationsBySize.size();
7799  ++index)
7800  {
7801  if(m_FreeSuballocationsBySize[index] == item)
7802  {
7803  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7804  return;
7805  }
7806  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7807  }
7808  VMA_ASSERT(0 && "Not found.");
7809  }
7810 
7811  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7812 }
7813 
7815 // class VmaBlockMetadata_Linear
7816 
7817 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7818  VmaBlockMetadata(hAllocator),
7819  m_SumFreeSize(0),
7820  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7821  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7822  m_1stVectorIndex(0),
7823  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7824  m_1stNullItemsBeginCount(0),
7825  m_1stNullItemsMiddleCount(0),
7826  m_2ndNullItemsCount(0)
7827 {
7828 }
7829 
7830 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7831 {
7832 }
7833 
7834 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7835 {
7836  VmaBlockMetadata::Init(size);
7837  m_SumFreeSize = size;
7838 }
7839 
7840 bool VmaBlockMetadata_Linear::Validate() const
7841 {
7842  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7843  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7844 
7845  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7846  VMA_VALIDATE(!suballocations1st.empty() ||
7847  suballocations2nd.empty() ||
7848  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7849 
7850  if(!suballocations1st.empty())
7851  {
7852  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7853  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7854  // Null item at the end should be just pop_back().
7855  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7856  }
7857  if(!suballocations2nd.empty())
7858  {
7859  // Null item at the end should be just pop_back().
7860  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7861  }
7862 
7863  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7864  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7865 
7866  VkDeviceSize sumUsedSize = 0;
7867  const size_t suballoc1stCount = suballocations1st.size();
7868  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7869 
7870  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7871  {
7872  const size_t suballoc2ndCount = suballocations2nd.size();
7873  size_t nullItem2ndCount = 0;
7874  for(size_t i = 0; i < suballoc2ndCount; ++i)
7875  {
7876  const VmaSuballocation& suballoc = suballocations2nd[i];
7877  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7878 
7879  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7880  VMA_VALIDATE(suballoc.offset >= offset);
7881 
7882  if(!currFree)
7883  {
7884  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7885  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7886  sumUsedSize += suballoc.size;
7887  }
7888  else
7889  {
7890  ++nullItem2ndCount;
7891  }
7892 
7893  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7894  }
7895 
7896  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7897  }
7898 
7899  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7900  {
7901  const VmaSuballocation& suballoc = suballocations1st[i];
7902  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7903  suballoc.hAllocation == VK_NULL_HANDLE);
7904  }
7905 
7906  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7907 
7908  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7909  {
7910  const VmaSuballocation& suballoc = suballocations1st[i];
7911  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7912 
7913  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7914  VMA_VALIDATE(suballoc.offset >= offset);
7915  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7916 
7917  if(!currFree)
7918  {
7919  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7920  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7921  sumUsedSize += suballoc.size;
7922  }
7923  else
7924  {
7925  ++nullItem1stCount;
7926  }
7927 
7928  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7929  }
7930  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7931 
7932  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7933  {
7934  const size_t suballoc2ndCount = suballocations2nd.size();
7935  size_t nullItem2ndCount = 0;
7936  for(size_t i = suballoc2ndCount; i--; )
7937  {
7938  const VmaSuballocation& suballoc = suballocations2nd[i];
7939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7940 
7941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7942  VMA_VALIDATE(suballoc.offset >= offset);
7943 
7944  if(!currFree)
7945  {
7946  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7947  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7948  sumUsedSize += suballoc.size;
7949  }
7950  else
7951  {
7952  ++nullItem2ndCount;
7953  }
7954 
7955  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7956  }
7957 
7958  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7959  }
7960 
7961  VMA_VALIDATE(offset <= GetSize());
7962  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7963 
7964  return true;
7965 }
7966 
7967 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7968 {
7969  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7970  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7971 }
7972 
7973 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7974 {
7975  const VkDeviceSize size = GetSize();
7976 
7977  /*
7978  We don't consider gaps inside allocation vectors with freed allocations because
7979  they are not suitable for reuse in linear allocator. We consider only space that
7980  is available for new allocations.
7981  */
7982  if(IsEmpty())
7983  {
7984  return size;
7985  }
7986 
7987  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7988 
7989  switch(m_2ndVectorMode)
7990  {
7991  case SECOND_VECTOR_EMPTY:
7992  /*
7993  Available space is after end of 1st, as well as before beginning of 1st (which
7994  whould make it a ring buffer).
7995  */
7996  {
7997  const size_t suballocations1stCount = suballocations1st.size();
7998  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7999  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8000  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8001  return VMA_MAX(
8002  firstSuballoc.offset,
8003  size - (lastSuballoc.offset + lastSuballoc.size));
8004  }
8005  break;
8006 
8007  case SECOND_VECTOR_RING_BUFFER:
8008  /*
8009  Available space is only between end of 2nd and beginning of 1st.
8010  */
8011  {
8012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8013  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8014  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8015  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8016  }
8017  break;
8018 
8019  case SECOND_VECTOR_DOUBLE_STACK:
8020  /*
8021  Available space is only between end of 1st and top of 2nd.
8022  */
8023  {
8024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8025  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8026  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8027  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8028  }
8029  break;
8030 
8031  default:
8032  VMA_ASSERT(0);
8033  return 0;
8034  }
8035 }
8036 
8037 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8038 {
8039  const VkDeviceSize size = GetSize();
8040  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8041  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8042  const size_t suballoc1stCount = suballocations1st.size();
8043  const size_t suballoc2ndCount = suballocations2nd.size();
8044 
8045  outInfo.blockCount = 1;
8046  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8047  outInfo.unusedRangeCount = 0;
8048  outInfo.usedBytes = 0;
8049  outInfo.allocationSizeMin = UINT64_MAX;
8050  outInfo.allocationSizeMax = 0;
8051  outInfo.unusedRangeSizeMin = UINT64_MAX;
8052  outInfo.unusedRangeSizeMax = 0;
8053 
8054  VkDeviceSize lastOffset = 0;
8055 
8056  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8057  {
8058  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8059  size_t nextAlloc2ndIndex = 0;
8060  while(lastOffset < freeSpace2ndTo1stEnd)
8061  {
8062  // Find next non-null allocation or move nextAllocIndex to the end.
8063  while(nextAlloc2ndIndex < suballoc2ndCount &&
8064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8065  {
8066  ++nextAlloc2ndIndex;
8067  }
8068 
8069  // Found non-null allocation.
8070  if(nextAlloc2ndIndex < suballoc2ndCount)
8071  {
8072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8073 
8074  // 1. Process free space before this allocation.
8075  if(lastOffset < suballoc.offset)
8076  {
8077  // There is free space from lastOffset to suballoc.offset.
8078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8079  ++outInfo.unusedRangeCount;
8080  outInfo.unusedBytes += unusedRangeSize;
8081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8083  }
8084 
8085  // 2. Process this allocation.
8086  // There is allocation with suballoc.offset, suballoc.size.
8087  outInfo.usedBytes += suballoc.size;
8088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8090 
8091  // 3. Prepare for next iteration.
8092  lastOffset = suballoc.offset + suballoc.size;
8093  ++nextAlloc2ndIndex;
8094  }
8095  // We are at the end.
8096  else
8097  {
8098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8099  if(lastOffset < freeSpace2ndTo1stEnd)
8100  {
8101  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8102  ++outInfo.unusedRangeCount;
8103  outInfo.unusedBytes += unusedRangeSize;
8104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8106  }
8107 
8108  // End of loop.
8109  lastOffset = freeSpace2ndTo1stEnd;
8110  }
8111  }
8112  }
8113 
8114  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8115  const VkDeviceSize freeSpace1stTo2ndEnd =
8116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8117  while(lastOffset < freeSpace1stTo2ndEnd)
8118  {
8119  // Find next non-null allocation or move nextAllocIndex to the end.
8120  while(nextAlloc1stIndex < suballoc1stCount &&
8121  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8122  {
8123  ++nextAlloc1stIndex;
8124  }
8125 
8126  // Found non-null allocation.
8127  if(nextAlloc1stIndex < suballoc1stCount)
8128  {
8129  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8130 
8131  // 1. Process free space before this allocation.
8132  if(lastOffset < suballoc.offset)
8133  {
8134  // There is free space from lastOffset to suballoc.offset.
8135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8136  ++outInfo.unusedRangeCount;
8137  outInfo.unusedBytes += unusedRangeSize;
8138  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8139  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8140  }
8141 
8142  // 2. Process this allocation.
8143  // There is allocation with suballoc.offset, suballoc.size.
8144  outInfo.usedBytes += suballoc.size;
8145  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8146  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8147 
8148  // 3. Prepare for next iteration.
8149  lastOffset = suballoc.offset + suballoc.size;
8150  ++nextAlloc1stIndex;
8151  }
8152  // We are at the end.
8153  else
8154  {
8155  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8156  if(lastOffset < freeSpace1stTo2ndEnd)
8157  {
8158  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8159  ++outInfo.unusedRangeCount;
8160  outInfo.unusedBytes += unusedRangeSize;
8161  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8162  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8163  }
8164 
8165  // End of loop.
8166  lastOffset = freeSpace1stTo2ndEnd;
8167  }
8168  }
8169 
8170  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8171  {
8172  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8173  while(lastOffset < size)
8174  {
8175  // Find next non-null allocation or move nextAllocIndex to the end.
8176  while(nextAlloc2ndIndex != SIZE_MAX &&
8177  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8178  {
8179  --nextAlloc2ndIndex;
8180  }
8181 
8182  // Found non-null allocation.
8183  if(nextAlloc2ndIndex != SIZE_MAX)
8184  {
8185  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8186 
8187  // 1. Process free space before this allocation.
8188  if(lastOffset < suballoc.offset)
8189  {
8190  // There is free space from lastOffset to suballoc.offset.
8191  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8192  ++outInfo.unusedRangeCount;
8193  outInfo.unusedBytes += unusedRangeSize;
8194  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8195  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8196  }
8197 
8198  // 2. Process this allocation.
8199  // There is allocation with suballoc.offset, suballoc.size.
8200  outInfo.usedBytes += suballoc.size;
8201  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8202  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8203 
8204  // 3. Prepare for next iteration.
8205  lastOffset = suballoc.offset + suballoc.size;
8206  --nextAlloc2ndIndex;
8207  }
8208  // We are at the end.
8209  else
8210  {
8211  // There is free space from lastOffset to size.
8212  if(lastOffset < size)
8213  {
8214  const VkDeviceSize unusedRangeSize = size - lastOffset;
8215  ++outInfo.unusedRangeCount;
8216  outInfo.unusedBytes += unusedRangeSize;
8217  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8218  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8219  }
8220 
8221  // End of loop.
8222  lastOffset = size;
8223  }
8224  }
8225  }
8226 
8227  outInfo.unusedBytes = size - outInfo.usedBytes;
8228 }
8229 
8230 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8231 {
8232  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8233  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8234  const VkDeviceSize size = GetSize();
8235  const size_t suballoc1stCount = suballocations1st.size();
8236  const size_t suballoc2ndCount = suballocations2nd.size();
8237 
8238  inoutStats.size += size;
8239 
8240  VkDeviceSize lastOffset = 0;
8241 
8242  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8243  {
8244  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8245  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8246  while(lastOffset < freeSpace2ndTo1stEnd)
8247  {
8248  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8249  while(nextAlloc2ndIndex < suballoc2ndCount &&
8250  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8251  {
8252  ++nextAlloc2ndIndex;
8253  }
8254 
8255  // Found non-null allocation.
8256  if(nextAlloc2ndIndex < suballoc2ndCount)
8257  {
8258  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8259 
8260  // 1. Process free space before this allocation.
8261  if(lastOffset < suballoc.offset)
8262  {
8263  // There is free space from lastOffset to suballoc.offset.
8264  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8265  inoutStats.unusedSize += unusedRangeSize;
8266  ++inoutStats.unusedRangeCount;
8267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8268  }
8269 
8270  // 2. Process this allocation.
8271  // There is allocation with suballoc.offset, suballoc.size.
8272  ++inoutStats.allocationCount;
8273 
8274  // 3. Prepare for next iteration.
8275  lastOffset = suballoc.offset + suballoc.size;
8276  ++nextAlloc2ndIndex;
8277  }
8278  // We are at the end.
8279  else
8280  {
8281  if(lastOffset < freeSpace2ndTo1stEnd)
8282  {
8283  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8284  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8285  inoutStats.unusedSize += unusedRangeSize;
8286  ++inoutStats.unusedRangeCount;
8287  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8288  }
8289 
8290  // End of loop.
8291  lastOffset = freeSpace2ndTo1stEnd;
8292  }
8293  }
8294  }
8295 
8296  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8297  const VkDeviceSize freeSpace1stTo2ndEnd =
8298  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8299  while(lastOffset < freeSpace1stTo2ndEnd)
8300  {
8301  // Find next non-null allocation or move nextAllocIndex to the end.
8302  while(nextAlloc1stIndex < suballoc1stCount &&
8303  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8304  {
8305  ++nextAlloc1stIndex;
8306  }
8307 
8308  // Found non-null allocation.
8309  if(nextAlloc1stIndex < suballoc1stCount)
8310  {
8311  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8312 
8313  // 1. Process free space before this allocation.
8314  if(lastOffset < suballoc.offset)
8315  {
8316  // There is free space from lastOffset to suballoc.offset.
8317  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8318  inoutStats.unusedSize += unusedRangeSize;
8319  ++inoutStats.unusedRangeCount;
8320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8321  }
8322 
8323  // 2. Process this allocation.
8324  // There is allocation with suballoc.offset, suballoc.size.
8325  ++inoutStats.allocationCount;
8326 
8327  // 3. Prepare for next iteration.
8328  lastOffset = suballoc.offset + suballoc.size;
8329  ++nextAlloc1stIndex;
8330  }
8331  // We are at the end.
8332  else
8333  {
8334  if(lastOffset < freeSpace1stTo2ndEnd)
8335  {
8336  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8337  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8338  inoutStats.unusedSize += unusedRangeSize;
8339  ++inoutStats.unusedRangeCount;
8340  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8341  }
8342 
8343  // End of loop.
8344  lastOffset = freeSpace1stTo2ndEnd;
8345  }
8346  }
8347 
8348  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8349  {
8350  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8351  while(lastOffset < size)
8352  {
8353  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8354  while(nextAlloc2ndIndex != SIZE_MAX &&
8355  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8356  {
8357  --nextAlloc2ndIndex;
8358  }
8359 
8360  // Found non-null allocation.
8361  if(nextAlloc2ndIndex != SIZE_MAX)
8362  {
8363  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8364 
8365  // 1. Process free space before this allocation.
8366  if(lastOffset < suballoc.offset)
8367  {
8368  // There is free space from lastOffset to suballoc.offset.
8369  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8370  inoutStats.unusedSize += unusedRangeSize;
8371  ++inoutStats.unusedRangeCount;
8372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8373  }
8374 
8375  // 2. Process this allocation.
8376  // There is allocation with suballoc.offset, suballoc.size.
8377  ++inoutStats.allocationCount;
8378 
8379  // 3. Prepare for next iteration.
8380  lastOffset = suballoc.offset + suballoc.size;
8381  --nextAlloc2ndIndex;
8382  }
8383  // We are at the end.
8384  else
8385  {
8386  if(lastOffset < size)
8387  {
8388  // There is free space from lastOffset to size.
8389  const VkDeviceSize unusedRangeSize = size - lastOffset;
8390  inoutStats.unusedSize += unusedRangeSize;
8391  ++inoutStats.unusedRangeCount;
8392  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8393  }
8394 
8395  // End of loop.
8396  lastOffset = size;
8397  }
8398  }
8399  }
8400 }
8401 
8402 #if VMA_STATS_STRING_ENABLED
8403 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8404 {
8405  const VkDeviceSize size = GetSize();
8406  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8407  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8408  const size_t suballoc1stCount = suballocations1st.size();
8409  const size_t suballoc2ndCount = suballocations2nd.size();
8410 
8411  // FIRST PASS
8412 
8413  size_t unusedRangeCount = 0;
8414  VkDeviceSize usedBytes = 0;
8415 
8416  VkDeviceSize lastOffset = 0;
8417 
8418  size_t alloc2ndCount = 0;
8419  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8420  {
8421  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8422  size_t nextAlloc2ndIndex = 0;
8423  while(lastOffset < freeSpace2ndTo1stEnd)
8424  {
8425  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8426  while(nextAlloc2ndIndex < suballoc2ndCount &&
8427  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8428  {
8429  ++nextAlloc2ndIndex;
8430  }
8431 
8432  // Found non-null allocation.
8433  if(nextAlloc2ndIndex < suballoc2ndCount)
8434  {
8435  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8436 
8437  // 1. Process free space before this allocation.
8438  if(lastOffset < suballoc.offset)
8439  {
8440  // There is free space from lastOffset to suballoc.offset.
8441  ++unusedRangeCount;
8442  }
8443 
8444  // 2. Process this allocation.
8445  // There is allocation with suballoc.offset, suballoc.size.
8446  ++alloc2ndCount;
8447  usedBytes += suballoc.size;
8448 
8449  // 3. Prepare for next iteration.
8450  lastOffset = suballoc.offset + suballoc.size;
8451  ++nextAlloc2ndIndex;
8452  }
8453  // We are at the end.
8454  else
8455  {
8456  if(lastOffset < freeSpace2ndTo1stEnd)
8457  {
8458  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8459  ++unusedRangeCount;
8460  }
8461 
8462  // End of loop.
8463  lastOffset = freeSpace2ndTo1stEnd;
8464  }
8465  }
8466  }
8467 
8468  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8469  size_t alloc1stCount = 0;
8470  const VkDeviceSize freeSpace1stTo2ndEnd =
8471  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8472  while(lastOffset < freeSpace1stTo2ndEnd)
8473  {
8474  // Find next non-null allocation or move nextAllocIndex to the end.
8475  while(nextAlloc1stIndex < suballoc1stCount &&
8476  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8477  {
8478  ++nextAlloc1stIndex;
8479  }
8480 
8481  // Found non-null allocation.
8482  if(nextAlloc1stIndex < suballoc1stCount)
8483  {
8484  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8485 
8486  // 1. Process free space before this allocation.
8487  if(lastOffset < suballoc.offset)
8488  {
8489  // There is free space from lastOffset to suballoc.offset.
8490  ++unusedRangeCount;
8491  }
8492 
8493  // 2. Process this allocation.
8494  // There is allocation with suballoc.offset, suballoc.size.
8495  ++alloc1stCount;
8496  usedBytes += suballoc.size;
8497 
8498  // 3. Prepare for next iteration.
8499  lastOffset = suballoc.offset + suballoc.size;
8500  ++nextAlloc1stIndex;
8501  }
8502  // We are at the end.
8503  else
8504  {
8505  if(lastOffset < size)
8506  {
8507  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8508  ++unusedRangeCount;
8509  }
8510 
8511  // End of loop.
8512  lastOffset = freeSpace1stTo2ndEnd;
8513  }
8514  }
8515 
8516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8517  {
8518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8519  while(lastOffset < size)
8520  {
8521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8522  while(nextAlloc2ndIndex != SIZE_MAX &&
8523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8524  {
8525  --nextAlloc2ndIndex;
8526  }
8527 
8528  // Found non-null allocation.
8529  if(nextAlloc2ndIndex != SIZE_MAX)
8530  {
8531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8532 
8533  // 1. Process free space before this allocation.
8534  if(lastOffset < suballoc.offset)
8535  {
8536  // There is free space from lastOffset to suballoc.offset.
8537  ++unusedRangeCount;
8538  }
8539 
8540  // 2. Process this allocation.
8541  // There is allocation with suballoc.offset, suballoc.size.
8542  ++alloc2ndCount;
8543  usedBytes += suballoc.size;
8544 
8545  // 3. Prepare for next iteration.
8546  lastOffset = suballoc.offset + suballoc.size;
8547  --nextAlloc2ndIndex;
8548  }
8549  // We are at the end.
8550  else
8551  {
8552  if(lastOffset < size)
8553  {
8554  // There is free space from lastOffset to size.
8555  ++unusedRangeCount;
8556  }
8557 
8558  // End of loop.
8559  lastOffset = size;
8560  }
8561  }
8562  }
8563 
8564  const VkDeviceSize unusedBytes = size - usedBytes;
8565  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8566 
8567  // SECOND PASS
8568  lastOffset = 0;
8569 
8570  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8571  {
8572  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8573  size_t nextAlloc2ndIndex = 0;
8574  while(lastOffset < freeSpace2ndTo1stEnd)
8575  {
8576  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8577  while(nextAlloc2ndIndex < suballoc2ndCount &&
8578  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8579  {
8580  ++nextAlloc2ndIndex;
8581  }
8582 
8583  // Found non-null allocation.
8584  if(nextAlloc2ndIndex < suballoc2ndCount)
8585  {
8586  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8587 
8588  // 1. Process free space before this allocation.
8589  if(lastOffset < suballoc.offset)
8590  {
8591  // There is free space from lastOffset to suballoc.offset.
8592  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8594  }
8595 
8596  // 2. Process this allocation.
8597  // There is allocation with suballoc.offset, suballoc.size.
8598  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8599 
8600  // 3. Prepare for next iteration.
8601  lastOffset = suballoc.offset + suballoc.size;
8602  ++nextAlloc2ndIndex;
8603  }
8604  // We are at the end.
8605  else
8606  {
8607  if(lastOffset < freeSpace2ndTo1stEnd)
8608  {
8609  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8610  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8612  }
8613 
8614  // End of loop.
8615  lastOffset = freeSpace2ndTo1stEnd;
8616  }
8617  }
8618  }
8619 
8620  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8621  while(lastOffset < freeSpace1stTo2ndEnd)
8622  {
8623  // Find next non-null allocation or move nextAllocIndex to the end.
8624  while(nextAlloc1stIndex < suballoc1stCount &&
8625  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8626  {
8627  ++nextAlloc1stIndex;
8628  }
8629 
8630  // Found non-null allocation.
8631  if(nextAlloc1stIndex < suballoc1stCount)
8632  {
8633  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8634 
8635  // 1. Process free space before this allocation.
8636  if(lastOffset < suballoc.offset)
8637  {
8638  // There is free space from lastOffset to suballoc.offset.
8639  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8640  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8641  }
8642 
8643  // 2. Process this allocation.
8644  // There is allocation with suballoc.offset, suballoc.size.
8645  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8646 
8647  // 3. Prepare for next iteration.
8648  lastOffset = suballoc.offset + suballoc.size;
8649  ++nextAlloc1stIndex;
8650  }
8651  // We are at the end.
8652  else
8653  {
8654  if(lastOffset < freeSpace1stTo2ndEnd)
8655  {
8656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8658  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8659  }
8660 
8661  // End of loop.
8662  lastOffset = freeSpace1stTo2ndEnd;
8663  }
8664  }
8665 
8666  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8667  {
8668  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8669  while(lastOffset < size)
8670  {
8671  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8672  while(nextAlloc2ndIndex != SIZE_MAX &&
8673  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8674  {
8675  --nextAlloc2ndIndex;
8676  }
8677 
8678  // Found non-null allocation.
8679  if(nextAlloc2ndIndex != SIZE_MAX)
8680  {
8681  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8682 
8683  // 1. Process free space before this allocation.
8684  if(lastOffset < suballoc.offset)
8685  {
8686  // There is free space from lastOffset to suballoc.offset.
8687  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8688  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8689  }
8690 
8691  // 2. Process this allocation.
8692  // There is allocation with suballoc.offset, suballoc.size.
8693  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8694 
8695  // 3. Prepare for next iteration.
8696  lastOffset = suballoc.offset + suballoc.size;
8697  --nextAlloc2ndIndex;
8698  }
8699  // We are at the end.
8700  else
8701  {
8702  if(lastOffset < size)
8703  {
8704  // There is free space from lastOffset to size.
8705  const VkDeviceSize unusedRangeSize = size - lastOffset;
8706  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8707  }
8708 
8709  // End of loop.
8710  lastOffset = size;
8711  }
8712  }
8713  }
8714 
8715  PrintDetailedMap_End(json);
8716 }
8717 #endif // #if VMA_STATS_STRING_ENABLED
8718 
8719 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8720  uint32_t currentFrameIndex,
8721  uint32_t frameInUseCount,
8722  VkDeviceSize bufferImageGranularity,
8723  VkDeviceSize allocSize,
8724  VkDeviceSize allocAlignment,
8725  bool upperAddress,
8726  VmaSuballocationType allocType,
8727  bool canMakeOtherLost,
8728  uint32_t strategy,
8729  VmaAllocationRequest* pAllocationRequest)
8730 {
8731  VMA_ASSERT(allocSize > 0);
8732  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8733  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8734  VMA_HEAVY_ASSERT(Validate());
8735 
8736  const VkDeviceSize size = GetSize();
8737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8739 
8740  if(upperAddress)
8741  {
8742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8743  {
8744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8745  return false;
8746  }
8747 
8748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8749  if(allocSize > size)
8750  {
8751  return false;
8752  }
8753  VkDeviceSize resultBaseOffset = size - allocSize;
8754  if(!suballocations2nd.empty())
8755  {
8756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8757  resultBaseOffset = lastSuballoc.offset - allocSize;
8758  if(allocSize > lastSuballoc.offset)
8759  {
8760  return false;
8761  }
8762  }
8763 
8764  // Start from offset equal to end of free space.
8765  VkDeviceSize resultOffset = resultBaseOffset;
8766 
8767  // Apply VMA_DEBUG_MARGIN at the end.
8768  if(VMA_DEBUG_MARGIN > 0)
8769  {
8770  if(resultOffset < VMA_DEBUG_MARGIN)
8771  {
8772  return false;
8773  }
8774  resultOffset -= VMA_DEBUG_MARGIN;
8775  }
8776 
8777  // Apply alignment.
8778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8779 
8780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8781  // Make bigger alignment if necessary.
8782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8783  {
8784  bool bufferImageGranularityConflict = false;
8785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8786  {
8787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8789  {
8790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8791  {
8792  bufferImageGranularityConflict = true;
8793  break;
8794  }
8795  }
8796  else
8797  // Already on previous page.
8798  break;
8799  }
8800  if(bufferImageGranularityConflict)
8801  {
8802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8803  }
8804  }
8805 
8806  // There is enough free space.
8807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8808  suballocations1st.back().offset + suballocations1st.back().size :
8809  0;
8810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8811  {
8812  // Check previous suballocations for BufferImageGranularity conflicts.
8813  // If conflict exists, allocation cannot be made here.
8814  if(bufferImageGranularity > 1)
8815  {
8816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8817  {
8818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8820  {
8821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8822  {
8823  return false;
8824  }
8825  }
8826  else
8827  {
8828  // Already on next page.
8829  break;
8830  }
8831  }
8832  }
8833 
8834  // All tests passed: Success.
8835  pAllocationRequest->offset = resultOffset;
8836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8837  pAllocationRequest->sumItemSize = 0;
8838  // pAllocationRequest->item unused.
8839  pAllocationRequest->itemsToMakeLostCount = 0;
8840  return true;
8841  }
8842  }
8843  else // !upperAddress
8844  {
8845  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8846  {
8847  // Try to allocate at the end of 1st vector.
8848 
8849  VkDeviceSize resultBaseOffset = 0;
8850  if(!suballocations1st.empty())
8851  {
8852  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8853  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8854  }
8855 
8856  // Start from offset equal to beginning of free space.
8857  VkDeviceSize resultOffset = resultBaseOffset;
8858 
8859  // Apply VMA_DEBUG_MARGIN at the beginning.
8860  if(VMA_DEBUG_MARGIN > 0)
8861  {
8862  resultOffset += VMA_DEBUG_MARGIN;
8863  }
8864 
8865  // Apply alignment.
8866  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8867 
8868  // Check previous suballocations for BufferImageGranularity conflicts.
8869  // Make bigger alignment if necessary.
8870  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8871  {
8872  bool bufferImageGranularityConflict = false;
8873  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8874  {
8875  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8876  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8877  {
8878  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8879  {
8880  bufferImageGranularityConflict = true;
8881  break;
8882  }
8883  }
8884  else
8885  // Already on previous page.
8886  break;
8887  }
8888  if(bufferImageGranularityConflict)
8889  {
8890  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8891  }
8892  }
8893 
8894  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8895  suballocations2nd.back().offset : size;
8896 
8897  // There is enough free space at the end after alignment.
8898  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8899  {
8900  // Check next suballocations for BufferImageGranularity conflicts.
8901  // If conflict exists, allocation cannot be made here.
8902  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8903  {
8904  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8905  {
8906  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8907  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8908  {
8909  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8910  {
8911  return false;
8912  }
8913  }
8914  else
8915  {
8916  // Already on previous page.
8917  break;
8918  }
8919  }
8920  }
8921 
8922  // All tests passed: Success.
8923  pAllocationRequest->offset = resultOffset;
8924  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8925  pAllocationRequest->sumItemSize = 0;
8926  // pAllocationRequest->item unused.
8927  pAllocationRequest->itemsToMakeLostCount = 0;
8928  return true;
8929  }
8930  }
8931 
8932  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8933  // beginning of 1st vector as the end of free space.
8934  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8935  {
8936  VMA_ASSERT(!suballocations1st.empty());
8937 
8938  VkDeviceSize resultBaseOffset = 0;
8939  if(!suballocations2nd.empty())
8940  {
8941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8942  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8943  }
8944 
8945  // Start from offset equal to beginning of free space.
8946  VkDeviceSize resultOffset = resultBaseOffset;
8947 
8948  // Apply VMA_DEBUG_MARGIN at the beginning.
8949  if(VMA_DEBUG_MARGIN > 0)
8950  {
8951  resultOffset += VMA_DEBUG_MARGIN;
8952  }
8953 
8954  // Apply alignment.
8955  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8956 
8957  // Check previous suballocations for BufferImageGranularity conflicts.
8958  // Make bigger alignment if necessary.
8959  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8960  {
8961  bool bufferImageGranularityConflict = false;
8962  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8963  {
8964  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8965  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8966  {
8967  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8968  {
8969  bufferImageGranularityConflict = true;
8970  break;
8971  }
8972  }
8973  else
8974  // Already on previous page.
8975  break;
8976  }
8977  if(bufferImageGranularityConflict)
8978  {
8979  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8980  }
8981  }
8982 
8983  pAllocationRequest->itemsToMakeLostCount = 0;
8984  pAllocationRequest->sumItemSize = 0;
8985  size_t index1st = m_1stNullItemsBeginCount;
8986 
8987  if(canMakeOtherLost)
8988  {
8989  while(index1st < suballocations1st.size() &&
8990  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8991  {
8992  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8993  const VmaSuballocation& suballoc = suballocations1st[index1st];
8994  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8995  {
8996  // No problem.
8997  }
8998  else
8999  {
9000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9001  if(suballoc.hAllocation->CanBecomeLost() &&
9002  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9003  {
9004  ++pAllocationRequest->itemsToMakeLostCount;
9005  pAllocationRequest->sumItemSize += suballoc.size;
9006  }
9007  else
9008  {
9009  return false;
9010  }
9011  }
9012  ++index1st;
9013  }
9014 
9015  // Check next suballocations for BufferImageGranularity conflicts.
9016  // If conflict exists, we must mark more allocations lost or fail.
9017  if(bufferImageGranularity > 1)
9018  {
9019  while(index1st < suballocations1st.size())
9020  {
9021  const VmaSuballocation& suballoc = suballocations1st[index1st];
9022  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9023  {
9024  if(suballoc.hAllocation != VK_NULL_HANDLE)
9025  {
9026  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9027  if(suballoc.hAllocation->CanBecomeLost() &&
9028  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9029  {
9030  ++pAllocationRequest->itemsToMakeLostCount;
9031  pAllocationRequest->sumItemSize += suballoc.size;
9032  }
9033  else
9034  {
9035  return false;
9036  }
9037  }
9038  }
9039  else
9040  {
9041  // Already on next page.
9042  break;
9043  }
9044  ++index1st;
9045  }
9046  }
9047  }
9048 
9049  // There is enough free space at the end after alignment.
9050  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9051  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9052  {
9053  // Check next suballocations for BufferImageGranularity conflicts.
9054  // If conflict exists, allocation cannot be made here.
9055  if(bufferImageGranularity > 1)
9056  {
9057  for(size_t nextSuballocIndex = index1st;
9058  nextSuballocIndex < suballocations1st.size();
9059  nextSuballocIndex++)
9060  {
9061  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9062  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9063  {
9064  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9065  {
9066  return false;
9067  }
9068  }
9069  else
9070  {
9071  // Already on next page.
9072  break;
9073  }
9074  }
9075  }
9076 
9077  // All tests passed: Success.
9078  pAllocationRequest->offset = resultOffset;
9079  pAllocationRequest->sumFreeSize =
9080  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9081  - resultBaseOffset
9082  - pAllocationRequest->sumItemSize;
9083  // pAllocationRequest->item unused.
9084  return true;
9085  }
9086  }
9087  }
9088 
9089  return false;
9090 }
9091 
9092 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9093  uint32_t currentFrameIndex,
9094  uint32_t frameInUseCount,
9095  VmaAllocationRequest* pAllocationRequest)
9096 {
9097  if(pAllocationRequest->itemsToMakeLostCount == 0)
9098  {
9099  return true;
9100  }
9101 
9102  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9103 
9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9105  size_t index1st = m_1stNullItemsBeginCount;
9106  size_t madeLostCount = 0;
9107  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9108  {
9109  VMA_ASSERT(index1st < suballocations1st.size());
9110  VmaSuballocation& suballoc = suballocations1st[index1st];
9111  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9112  {
9113  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9114  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9115  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9116  {
9117  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9118  suballoc.hAllocation = VK_NULL_HANDLE;
9119  m_SumFreeSize += suballoc.size;
9120  ++m_1stNullItemsMiddleCount;
9121  ++madeLostCount;
9122  }
9123  else
9124  {
9125  return false;
9126  }
9127  }
9128  ++index1st;
9129  }
9130 
9131  CleanupAfterFree();
9132  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9133 
9134  return true;
9135 }
9136 
9137 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9138 {
9139  uint32_t lostAllocationCount = 0;
9140 
9141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9142  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9143  {
9144  VmaSuballocation& suballoc = suballocations1st[i];
9145  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9146  suballoc.hAllocation->CanBecomeLost() &&
9147  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9148  {
9149  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9150  suballoc.hAllocation = VK_NULL_HANDLE;
9151  ++m_1stNullItemsMiddleCount;
9152  m_SumFreeSize += suballoc.size;
9153  ++lostAllocationCount;
9154  }
9155  }
9156 
9157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9159  {
9160  VmaSuballocation& suballoc = suballocations2nd[i];
9161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9162  suballoc.hAllocation->CanBecomeLost() &&
9163  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9164  {
9165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9166  suballoc.hAllocation = VK_NULL_HANDLE;
9167  ++m_2ndNullItemsCount;
9168  ++lostAllocationCount;
9169  }
9170  }
9171 
9172  if(lostAllocationCount)
9173  {
9174  CleanupAfterFree();
9175  }
9176 
9177  return lostAllocationCount;
9178 }
9179 
9180 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9181 {
9182  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9183  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9184  {
9185  const VmaSuballocation& suballoc = suballocations1st[i];
9186  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9187  {
9188  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9189  {
9190  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9191  return VK_ERROR_VALIDATION_FAILED_EXT;
9192  }
9193  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9194  {
9195  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9196  return VK_ERROR_VALIDATION_FAILED_EXT;
9197  }
9198  }
9199  }
9200 
9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9202  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9203  {
9204  const VmaSuballocation& suballoc = suballocations2nd[i];
9205  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9206  {
9207  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9208  {
9209  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9210  return VK_ERROR_VALIDATION_FAILED_EXT;
9211  }
9212  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9213  {
9214  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9215  return VK_ERROR_VALIDATION_FAILED_EXT;
9216  }
9217  }
9218  }
9219 
9220  return VK_SUCCESS;
9221 }
9222 
9223 void VmaBlockMetadata_Linear::Alloc(
9224  const VmaAllocationRequest& request,
9225  VmaSuballocationType type,
9226  VkDeviceSize allocSize,
9227  bool upperAddress,
9228  VmaAllocation hAllocation)
9229 {
9230  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9231 
9232  if(upperAddress)
9233  {
9234  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9235  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9236  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9237  suballocations2nd.push_back(newSuballoc);
9238  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9239  }
9240  else
9241  {
9242  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9243 
9244  // First allocation.
9245  if(suballocations1st.empty())
9246  {
9247  suballocations1st.push_back(newSuballoc);
9248  }
9249  else
9250  {
9251  // New allocation at the end of 1st vector.
9252  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9253  {
9254  // Check if it fits before the end of the block.
9255  VMA_ASSERT(request.offset + allocSize <= GetSize());
9256  suballocations1st.push_back(newSuballoc);
9257  }
9258  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9259  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9260  {
9261  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9262 
9263  switch(m_2ndVectorMode)
9264  {
9265  case SECOND_VECTOR_EMPTY:
9266  // First allocation from second part ring buffer.
9267  VMA_ASSERT(suballocations2nd.empty());
9268  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9269  break;
9270  case SECOND_VECTOR_RING_BUFFER:
9271  // 2-part ring buffer is already started.
9272  VMA_ASSERT(!suballocations2nd.empty());
9273  break;
9274  case SECOND_VECTOR_DOUBLE_STACK:
9275  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9276  break;
9277  default:
9278  VMA_ASSERT(0);
9279  }
9280 
9281  suballocations2nd.push_back(newSuballoc);
9282  }
9283  else
9284  {
9285  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9286  }
9287  }
9288  }
9289 
9290  m_SumFreeSize -= newSuballoc.size;
9291 }
9292 
9293 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9294 {
9295  FreeAtOffset(allocation->GetOffset());
9296 }
9297 
9298 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9299 {
9300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9301  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9302 
9303  if(!suballocations1st.empty())
9304  {
9305  // First allocation: Mark it as next empty at the beginning.
9306  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9307  if(firstSuballoc.offset == offset)
9308  {
9309  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9310  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9311  m_SumFreeSize += firstSuballoc.size;
9312  ++m_1stNullItemsBeginCount;
9313  CleanupAfterFree();
9314  return;
9315  }
9316  }
9317 
9318  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9320  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9321  {
9322  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9323  if(lastSuballoc.offset == offset)
9324  {
9325  m_SumFreeSize += lastSuballoc.size;
9326  suballocations2nd.pop_back();
9327  CleanupAfterFree();
9328  return;
9329  }
9330  }
9331  // Last allocation in 1st vector.
9332  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9333  {
9334  VmaSuballocation& lastSuballoc = suballocations1st.back();
9335  if(lastSuballoc.offset == offset)
9336  {
9337  m_SumFreeSize += lastSuballoc.size;
9338  suballocations1st.pop_back();
9339  CleanupAfterFree();
9340  return;
9341  }
9342  }
9343 
9344  // Item from the middle of 1st vector.
9345  {
9346  VmaSuballocation refSuballoc;
9347  refSuballoc.offset = offset;
9348  // Rest of members stays uninitialized intentionally for better performance.
9349  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9350  suballocations1st.begin() + m_1stNullItemsBeginCount,
9351  suballocations1st.end(),
9352  refSuballoc);
9353  if(it != suballocations1st.end())
9354  {
9355  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9356  it->hAllocation = VK_NULL_HANDLE;
9357  ++m_1stNullItemsMiddleCount;
9358  m_SumFreeSize += it->size;
9359  CleanupAfterFree();
9360  return;
9361  }
9362  }
9363 
9364  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9365  {
9366  // Item from the middle of 2nd vector.
9367  VmaSuballocation refSuballoc;
9368  refSuballoc.offset = offset;
9369  // Rest of members stays uninitialized intentionally for better performance.
9370  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9371  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9372  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9373  if(it != suballocations2nd.end())
9374  {
9375  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9376  it->hAllocation = VK_NULL_HANDLE;
9377  ++m_2ndNullItemsCount;
9378  m_SumFreeSize += it->size;
9379  CleanupAfterFree();
9380  return;
9381  }
9382  }
9383 
9384  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9385 }
9386 
9387 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9388 {
9389  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9390  const size_t suballocCount = AccessSuballocations1st().size();
9391  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9392 }
9393 
9394 void VmaBlockMetadata_Linear::CleanupAfterFree()
9395 {
9396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9398 
9399  if(IsEmpty())
9400  {
9401  suballocations1st.clear();
9402  suballocations2nd.clear();
9403  m_1stNullItemsBeginCount = 0;
9404  m_1stNullItemsMiddleCount = 0;
9405  m_2ndNullItemsCount = 0;
9406  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9407  }
9408  else
9409  {
9410  const size_t suballoc1stCount = suballocations1st.size();
9411  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9412  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9413 
9414  // Find more null items at the beginning of 1st vector.
9415  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9416  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9417  {
9418  ++m_1stNullItemsBeginCount;
9419  --m_1stNullItemsMiddleCount;
9420  }
9421 
9422  // Find more null items at the end of 1st vector.
9423  while(m_1stNullItemsMiddleCount > 0 &&
9424  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9425  {
9426  --m_1stNullItemsMiddleCount;
9427  suballocations1st.pop_back();
9428  }
9429 
9430  // Find more null items at the end of 2nd vector.
9431  while(m_2ndNullItemsCount > 0 &&
9432  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9433  {
9434  --m_2ndNullItemsCount;
9435  suballocations2nd.pop_back();
9436  }
9437 
9438  if(ShouldCompact1st())
9439  {
9440  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9441  size_t srcIndex = m_1stNullItemsBeginCount;
9442  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9443  {
9444  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9445  {
9446  ++srcIndex;
9447  }
9448  if(dstIndex != srcIndex)
9449  {
9450  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9451  }
9452  ++srcIndex;
9453  }
9454  suballocations1st.resize(nonNullItemCount);
9455  m_1stNullItemsBeginCount = 0;
9456  m_1stNullItemsMiddleCount = 0;
9457  }
9458 
9459  // 2nd vector became empty.
9460  if(suballocations2nd.empty())
9461  {
9462  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9463  }
9464 
9465  // 1st vector became empty.
9466  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9467  {
9468  suballocations1st.clear();
9469  m_1stNullItemsBeginCount = 0;
9470 
9471  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9472  {
9473  // Swap 1st with 2nd. Now 2nd is empty.
9474  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9475  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9476  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9477  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9478  {
9479  ++m_1stNullItemsBeginCount;
9480  --m_1stNullItemsMiddleCount;
9481  }
9482  m_2ndNullItemsCount = 0;
9483  m_1stVectorIndex ^= 1;
9484  }
9485  }
9486  }
9487 
9488  VMA_HEAVY_ASSERT(Validate());
9489 }
9490 
9491 
9493 // class VmaBlockMetadata_Buddy
9494 
9495 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9496  VmaBlockMetadata(hAllocator),
9497  m_Root(VMA_NULL),
9498  m_AllocationCount(0),
9499  m_FreeCount(1),
9500  m_SumFreeSize(0)
9501 {
9502  memset(m_FreeList, 0, sizeof(m_FreeList));
9503 }
9504 
9505 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9506 {
9507  DeleteNode(m_Root);
9508 }
9509 
9510 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9511 {
9512  VmaBlockMetadata::Init(size);
9513 
9514  m_UsableSize = VmaPrevPow2(size);
9515  m_SumFreeSize = m_UsableSize;
9516 
9517  // Calculate m_LevelCount.
9518  m_LevelCount = 1;
9519  while(m_LevelCount < MAX_LEVELS &&
9520  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9521  {
9522  ++m_LevelCount;
9523  }
9524 
9525  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9526  rootNode->offset = 0;
9527  rootNode->type = Node::TYPE_FREE;
9528  rootNode->parent = VMA_NULL;
9529  rootNode->buddy = VMA_NULL;
9530 
9531  m_Root = rootNode;
9532  AddToFreeListFront(0, rootNode);
9533 }
9534 
9535 bool VmaBlockMetadata_Buddy::Validate() const
9536 {
9537  // Validate tree.
9538  ValidationContext ctx;
9539  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9540  {
9541  VMA_VALIDATE(false && "ValidateNode failed.");
9542  }
9543  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9544  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9545 
9546  // Validate free node lists.
9547  for(uint32_t level = 0; level < m_LevelCount; ++level)
9548  {
9549  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9550  m_FreeList[level].front->free.prev == VMA_NULL);
9551 
9552  for(Node* node = m_FreeList[level].front;
9553  node != VMA_NULL;
9554  node = node->free.next)
9555  {
9556  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9557 
9558  if(node->free.next == VMA_NULL)
9559  {
9560  VMA_VALIDATE(m_FreeList[level].back == node);
9561  }
9562  else
9563  {
9564  VMA_VALIDATE(node->free.next->free.prev == node);
9565  }
9566  }
9567  }
9568 
9569  // Validate that free lists ar higher levels are empty.
9570  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9571  {
9572  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9573  }
9574 
9575  return true;
9576 }
9577 
9578 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9579 {
9580  for(uint32_t level = 0; level < m_LevelCount; ++level)
9581  {
9582  if(m_FreeList[level].front != VMA_NULL)
9583  {
9584  return LevelToNodeSize(level);
9585  }
9586  }
9587  return 0;
9588 }
9589 
9590 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9591 {
9592  const VkDeviceSize unusableSize = GetUnusableSize();
9593 
9594  outInfo.blockCount = 1;
9595 
9596  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9597  outInfo.usedBytes = outInfo.unusedBytes = 0;
9598 
9599  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9600  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9601  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9602 
9603  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9604 
9605  if(unusableSize > 0)
9606  {
9607  ++outInfo.unusedRangeCount;
9608  outInfo.unusedBytes += unusableSize;
9609  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9611  }
9612 }
9613 
9614 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9615 {
9616  const VkDeviceSize unusableSize = GetUnusableSize();
9617 
9618  inoutStats.size += GetSize();
9619  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9620  inoutStats.allocationCount += m_AllocationCount;
9621  inoutStats.unusedRangeCount += m_FreeCount;
9622  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9623 
9624  if(unusableSize > 0)
9625  {
9626  ++inoutStats.unusedRangeCount;
9627  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9628  }
9629 }
9630 
9631 #if VMA_STATS_STRING_ENABLED
9632 
9633 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9634 {
9635  // TODO optimize
9636  VmaStatInfo stat;
9637  CalcAllocationStatInfo(stat);
9638 
9639  PrintDetailedMap_Begin(
9640  json,
9641  stat.unusedBytes,
9642  stat.allocationCount,
9643  stat.unusedRangeCount);
9644 
9645  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9646 
9647  const VkDeviceSize unusableSize = GetUnusableSize();
9648  if(unusableSize > 0)
9649  {
9650  PrintDetailedMap_UnusedRange(json,
9651  m_UsableSize, // offset
9652  unusableSize); // size
9653  }
9654 
9655  PrintDetailedMap_End(json);
9656 }
9657 
9658 #endif // #if VMA_STATS_STRING_ENABLED
9659 
9660 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9661  uint32_t currentFrameIndex,
9662  uint32_t frameInUseCount,
9663  VkDeviceSize bufferImageGranularity,
9664  VkDeviceSize allocSize,
9665  VkDeviceSize allocAlignment,
9666  bool upperAddress,
9667  VmaSuballocationType allocType,
9668  bool canMakeOtherLost,
9669  uint32_t strategy,
9670  VmaAllocationRequest* pAllocationRequest)
9671 {
9672  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9673 
9674  // Simple way to respect bufferImageGranularity. May be optimized some day.
9675  // Whenever it might be an OPTIMAL image...
9676  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9677  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9678  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9679  {
9680  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9681  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9682  }
9683 
9684  if(allocSize > m_UsableSize)
9685  {
9686  return false;
9687  }
9688 
9689  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9690  for(uint32_t level = targetLevel + 1; level--; )
9691  {
9692  for(Node* freeNode = m_FreeList[level].front;
9693  freeNode != VMA_NULL;
9694  freeNode = freeNode->free.next)
9695  {
9696  if(freeNode->offset % allocAlignment == 0)
9697  {
9698  pAllocationRequest->offset = freeNode->offset;
9699  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9700  pAllocationRequest->sumItemSize = 0;
9701  pAllocationRequest->itemsToMakeLostCount = 0;
9702  pAllocationRequest->customData = (void*)(uintptr_t)level;
9703  return true;
9704  }
9705  }
9706  }
9707 
9708  return false;
9709 }
9710 
9711 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9712  uint32_t currentFrameIndex,
9713  uint32_t frameInUseCount,
9714  VmaAllocationRequest* pAllocationRequest)
9715 {
9716  /*
9717  Lost allocations are not supported in buddy allocator at the moment.
9718  Support might be added in the future.
9719  */
9720  return pAllocationRequest->itemsToMakeLostCount == 0;
9721 }
9722 
9723 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9724 {
9725  /*
9726  Lost allocations are not supported in buddy allocator at the moment.
9727  Support might be added in the future.
9728  */
9729  return 0;
9730 }
9731 
9732 void VmaBlockMetadata_Buddy::Alloc(
9733  const VmaAllocationRequest& request,
9734  VmaSuballocationType type,
9735  VkDeviceSize allocSize,
9736  bool upperAddress,
9737  VmaAllocation hAllocation)
9738 {
9739  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9740  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9741 
9742  Node* currNode = m_FreeList[currLevel].front;
9743  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9744  while(currNode->offset != request.offset)
9745  {
9746  currNode = currNode->free.next;
9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9748  }
9749 
9750  // Go down, splitting free nodes.
9751  while(currLevel < targetLevel)
9752  {
9753  // currNode is already first free node at currLevel.
9754  // Remove it from list of free nodes at this currLevel.
9755  RemoveFromFreeList(currLevel, currNode);
9756 
9757  const uint32_t childrenLevel = currLevel + 1;
9758 
9759  // Create two free sub-nodes.
9760  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9761  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9762 
9763  leftChild->offset = currNode->offset;
9764  leftChild->type = Node::TYPE_FREE;
9765  leftChild->parent = currNode;
9766  leftChild->buddy = rightChild;
9767 
9768  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9769  rightChild->type = Node::TYPE_FREE;
9770  rightChild->parent = currNode;
9771  rightChild->buddy = leftChild;
9772 
9773  // Convert current currNode to split type.
9774  currNode->type = Node::TYPE_SPLIT;
9775  currNode->split.leftChild = leftChild;
9776 
9777  // Add child nodes to free list. Order is important!
9778  AddToFreeListFront(childrenLevel, rightChild);
9779  AddToFreeListFront(childrenLevel, leftChild);
9780 
9781  ++m_FreeCount;
9782  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9783  ++currLevel;
9784  currNode = m_FreeList[currLevel].front;
9785 
9786  /*
9787  We can be sure that currNode, as left child of node previously split,
9788  also fullfills the alignment requirement.
9789  */
9790  }
9791 
9792  // Remove from free list.
9793  VMA_ASSERT(currLevel == targetLevel &&
9794  currNode != VMA_NULL &&
9795  currNode->type == Node::TYPE_FREE);
9796  RemoveFromFreeList(currLevel, currNode);
9797 
9798  // Convert to allocation node.
9799  currNode->type = Node::TYPE_ALLOCATION;
9800  currNode->allocation.alloc = hAllocation;
9801 
9802  ++m_AllocationCount;
9803  --m_FreeCount;
9804  m_SumFreeSize -= allocSize;
9805 }
9806 
9807 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9808 {
9809  if(node->type == Node::TYPE_SPLIT)
9810  {
9811  DeleteNode(node->split.leftChild->buddy);
9812  DeleteNode(node->split.leftChild);
9813  }
9814 
9815  vma_delete(GetAllocationCallbacks(), node);
9816 }
9817 
9818 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9819 {
9820  VMA_VALIDATE(level < m_LevelCount);
9821  VMA_VALIDATE(curr->parent == parent);
9822  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9823  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9824  switch(curr->type)
9825  {
9826  case Node::TYPE_FREE:
9827  // curr->free.prev, next are validated separately.
9828  ctx.calculatedSumFreeSize += levelNodeSize;
9829  ++ctx.calculatedFreeCount;
9830  break;
9831  case Node::TYPE_ALLOCATION:
9832  ++ctx.calculatedAllocationCount;
9833  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9834  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9835  break;
9836  case Node::TYPE_SPLIT:
9837  {
9838  const uint32_t childrenLevel = level + 1;
9839  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9840  const Node* const leftChild = curr->split.leftChild;
9841  VMA_VALIDATE(leftChild != VMA_NULL);
9842  VMA_VALIDATE(leftChild->offset == curr->offset);
9843  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9844  {
9845  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9846  }
9847  const Node* const rightChild = leftChild->buddy;
9848  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9849  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9850  {
9851  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9852  }
9853  }
9854  break;
9855  default:
9856  return false;
9857  }
9858 
9859  return true;
9860 }
9861 
9862 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9863 {
9864  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9865  uint32_t level = 0;
9866  VkDeviceSize currLevelNodeSize = m_UsableSize;
9867  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9868  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9869  {
9870  ++level;
9871  currLevelNodeSize = nextLevelNodeSize;
9872  nextLevelNodeSize = currLevelNodeSize >> 1;
9873  }
9874  return level;
9875 }
9876 
9877 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9878 {
9879  // Find node and level.
9880  Node* node = m_Root;
9881  VkDeviceSize nodeOffset = 0;
9882  uint32_t level = 0;
9883  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9884  while(node->type == Node::TYPE_SPLIT)
9885  {
9886  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9887  if(offset < nodeOffset + nextLevelSize)
9888  {
9889  node = node->split.leftChild;
9890  }
9891  else
9892  {
9893  node = node->split.leftChild->buddy;
9894  nodeOffset += nextLevelSize;
9895  }
9896  ++level;
9897  levelNodeSize = nextLevelSize;
9898  }
9899 
9900  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9901  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9902 
9903  ++m_FreeCount;
9904  --m_AllocationCount;
9905  m_SumFreeSize += alloc->GetSize();
9906 
9907  node->type = Node::TYPE_FREE;
9908 
9909  // Join free nodes if possible.
9910  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9911  {
9912  RemoveFromFreeList(level, node->buddy);
9913  Node* const parent = node->parent;
9914 
9915  vma_delete(GetAllocationCallbacks(), node->buddy);
9916  vma_delete(GetAllocationCallbacks(), node);
9917  parent->type = Node::TYPE_FREE;
9918 
9919  node = parent;
9920  --level;
9921  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9922  --m_FreeCount;
9923  }
9924 
9925  AddToFreeListFront(level, node);
9926 }
9927 
9928 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9929 {
9930  switch(node->type)
9931  {
9932  case Node::TYPE_FREE:
9933  ++outInfo.unusedRangeCount;
9934  outInfo.unusedBytes += levelNodeSize;
9935  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9936  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9937  break;
9938  case Node::TYPE_ALLOCATION:
9939  {
9940  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9941  ++outInfo.allocationCount;
9942  outInfo.usedBytes += allocSize;
9943  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9944  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9945 
9946  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9947  if(unusedRangeSize > 0)
9948  {
9949  ++outInfo.unusedRangeCount;
9950  outInfo.unusedBytes += unusedRangeSize;
9951  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9952  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9953  }
9954  }
9955  break;
9956  case Node::TYPE_SPLIT:
9957  {
9958  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9959  const Node* const leftChild = node->split.leftChild;
9960  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9961  const Node* const rightChild = leftChild->buddy;
9962  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9963  }
9964  break;
9965  default:
9966  VMA_ASSERT(0);
9967  }
9968 }
9969 
9970 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9971 {
9972  VMA_ASSERT(node->type == Node::TYPE_FREE);
9973 
9974  // List is empty.
9975  Node* const frontNode = m_FreeList[level].front;
9976  if(frontNode == VMA_NULL)
9977  {
9978  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9979  node->free.prev = node->free.next = VMA_NULL;
9980  m_FreeList[level].front = m_FreeList[level].back = node;
9981  }
9982  else
9983  {
9984  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9985  node->free.prev = VMA_NULL;
9986  node->free.next = frontNode;
9987  frontNode->free.prev = node;
9988  m_FreeList[level].front = node;
9989  }
9990 }
9991 
9992 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9993 {
9994  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9995 
9996  // It is at the front.
9997  if(node->free.prev == VMA_NULL)
9998  {
9999  VMA_ASSERT(m_FreeList[level].front == node);
10000  m_FreeList[level].front = node->free.next;
10001  }
10002  else
10003  {
10004  Node* const prevFreeNode = node->free.prev;
10005  VMA_ASSERT(prevFreeNode->free.next == node);
10006  prevFreeNode->free.next = node->free.next;
10007  }
10008 
10009  // It is at the back.
10010  if(node->free.next == VMA_NULL)
10011  {
10012  VMA_ASSERT(m_FreeList[level].back == node);
10013  m_FreeList[level].back = node->free.prev;
10014  }
10015  else
10016  {
10017  Node* const nextFreeNode = node->free.next;
10018  VMA_ASSERT(nextFreeNode->free.prev == node);
10019  nextFreeNode->free.prev = node->free.prev;
10020  }
10021 }
10022 
10023 #if VMA_STATS_STRING_ENABLED
10024 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10025 {
10026  switch(node->type)
10027  {
10028  case Node::TYPE_FREE:
10029  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10030  break;
10031  case Node::TYPE_ALLOCATION:
10032  {
10033  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10035  if(allocSize < levelNodeSize)
10036  {
10037  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10038  }
10039  }
10040  break;
10041  case Node::TYPE_SPLIT:
10042  {
10043  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10044  const Node* const leftChild = node->split.leftChild;
10045  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10046  const Node* const rightChild = leftChild->buddy;
10047  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10048  }
10049  break;
10050  default:
10051  VMA_ASSERT(0);
10052  }
10053 }
10054 #endif // #if VMA_STATS_STRING_ENABLED
10055 
10056 
10058 // class VmaDeviceMemoryBlock
10059 
10060 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10061  m_pMetadata(VMA_NULL),
10062  m_MemoryTypeIndex(UINT32_MAX),
10063  m_Id(0),
10064  m_hMemory(VK_NULL_HANDLE),
10065  m_MapCount(0),
10066  m_pMappedData(VMA_NULL)
10067 {
10068 }
10069 
10070 void VmaDeviceMemoryBlock::Init(
10071  VmaAllocator hAllocator,
10072  uint32_t newMemoryTypeIndex,
10073  VkDeviceMemory newMemory,
10074  VkDeviceSize newSize,
10075  uint32_t id,
10076  uint32_t algorithm)
10077 {
10078  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10079 
10080  m_MemoryTypeIndex = newMemoryTypeIndex;
10081  m_Id = id;
10082  m_hMemory = newMemory;
10083 
10084  switch(algorithm)
10085  {
10087  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10088  break;
10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10091  break;
10092  default:
10093  VMA_ASSERT(0);
10094  // Fall-through.
10095  case 0:
10096  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10097  }
10098  m_pMetadata->Init(newSize);
10099 }
10100 
10101 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10102 {
10103  // This is the most important assert in the entire library.
10104  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10105  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10106 
10107  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10108  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10109  m_hMemory = VK_NULL_HANDLE;
10110 
10111  vma_delete(allocator, m_pMetadata);
10112  m_pMetadata = VMA_NULL;
10113 }
10114 
10115 bool VmaDeviceMemoryBlock::Validate() const
10116 {
10117  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10118  (m_pMetadata->GetSize() != 0));
10119 
10120  return m_pMetadata->Validate();
10121 }
10122 
10123 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10124 {
10125  void* pData = nullptr;
10126  VkResult res = Map(hAllocator, 1, &pData);
10127  if(res != VK_SUCCESS)
10128  {
10129  return res;
10130  }
10131 
10132  res = m_pMetadata->CheckCorruption(pData);
10133 
10134  Unmap(hAllocator, 1);
10135 
10136  return res;
10137 }
10138 
10139 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10140 {
10141  if(count == 0)
10142  {
10143  return VK_SUCCESS;
10144  }
10145 
10146  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10147  if(m_MapCount != 0)
10148  {
10149  m_MapCount += count;
10150  VMA_ASSERT(m_pMappedData != VMA_NULL);
10151  if(ppData != VMA_NULL)
10152  {
10153  *ppData = m_pMappedData;
10154  }
10155  return VK_SUCCESS;
10156  }
10157  else
10158  {
10159  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10160  hAllocator->m_hDevice,
10161  m_hMemory,
10162  0, // offset
10163  VK_WHOLE_SIZE,
10164  0, // flags
10165  &m_pMappedData);
10166  if(result == VK_SUCCESS)
10167  {
10168  if(ppData != VMA_NULL)
10169  {
10170  *ppData = m_pMappedData;
10171  }
10172  m_MapCount = count;
10173  }
10174  return result;
10175  }
10176 }
10177 
10178 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10179 {
10180  if(count == 0)
10181  {
10182  return;
10183  }
10184 
10185  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10186  if(m_MapCount >= count)
10187  {
10188  m_MapCount -= count;
10189  if(m_MapCount == 0)
10190  {
10191  m_pMappedData = VMA_NULL;
10192  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10193  }
10194  }
10195  else
10196  {
10197  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10198  }
10199 }
10200 
10201 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10202 {
10203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10205 
10206  void* pData;
10207  VkResult res = Map(hAllocator, 1, &pData);
10208  if(res != VK_SUCCESS)
10209  {
10210  return res;
10211  }
10212 
10213  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10214  VmaWriteMagicValue(pData, allocOffset + allocSize);
10215 
10216  Unmap(hAllocator, 1);
10217 
10218  return VK_SUCCESS;
10219 }
10220 
10221 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10222 {
10223  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10224  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10225 
10226  void* pData;
10227  VkResult res = Map(hAllocator, 1, &pData);
10228  if(res != VK_SUCCESS)
10229  {
10230  return res;
10231  }
10232 
10233  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10234  {
10235  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10236  }
10237  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10238  {
10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10240  }
10241 
10242  Unmap(hAllocator, 1);
10243 
10244  return VK_SUCCESS;
10245 }
10246 
10247 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10248  const VmaAllocator hAllocator,
10249  const VmaAllocation hAllocation,
10250  VkBuffer hBuffer)
10251 {
10252  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10253  hAllocation->GetBlock() == this);
10254  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10255  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10256  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10257  hAllocator->m_hDevice,
10258  hBuffer,
10259  m_hMemory,
10260  hAllocation->GetOffset());
10261 }
10262 
10263 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10264  const VmaAllocator hAllocator,
10265  const VmaAllocation hAllocation,
10266  VkImage hImage)
10267 {
10268  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10269  hAllocation->GetBlock() == this);
10270  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10272  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10273  hAllocator->m_hDevice,
10274  hImage,
10275  m_hMemory,
10276  hAllocation->GetOffset());
10277 }
10278 
10279 static void InitStatInfo(VmaStatInfo& outInfo)
10280 {
10281  memset(&outInfo, 0, sizeof(outInfo));
10282  outInfo.allocationSizeMin = UINT64_MAX;
10283  outInfo.unusedRangeSizeMin = UINT64_MAX;
10284 }
10285 
10286 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10287 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10288 {
10289  inoutInfo.blockCount += srcInfo.blockCount;
10290  inoutInfo.allocationCount += srcInfo.allocationCount;
10291  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10292  inoutInfo.usedBytes += srcInfo.usedBytes;
10293  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10294  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10295  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10296  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10297  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10298 }
10299 
10300 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10301 {
10302  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10304  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10305  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10306 }
10307 
10308 VmaPool_T::VmaPool_T(
10309  VmaAllocator hAllocator,
10310  const VmaPoolCreateInfo& createInfo,
10311  VkDeviceSize preferredBlockSize) :
10312  m_BlockVector(
10313  hAllocator,
10314  createInfo.memoryTypeIndex,
10315  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10316  createInfo.minBlockCount,
10317  createInfo.maxBlockCount,
10318  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10319  createInfo.frameInUseCount,
10320  true, // isCustomPool
10321  createInfo.blockSize != 0, // explicitBlockSize
10322  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10323  m_Id(0)
10324 {
10325 }
10326 
10327 VmaPool_T::~VmaPool_T()
10328 {
10329 }
10330 
10331 #if VMA_STATS_STRING_ENABLED
10332 
10333 #endif // #if VMA_STATS_STRING_ENABLED
10334 
10335 VmaBlockVector::VmaBlockVector(
10336  VmaAllocator hAllocator,
10337  uint32_t memoryTypeIndex,
10338  VkDeviceSize preferredBlockSize,
10339  size_t minBlockCount,
10340  size_t maxBlockCount,
10341  VkDeviceSize bufferImageGranularity,
10342  uint32_t frameInUseCount,
10343  bool isCustomPool,
10344  bool explicitBlockSize,
10345  uint32_t algorithm) :
10346  m_hAllocator(hAllocator),
10347  m_MemoryTypeIndex(memoryTypeIndex),
10348  m_PreferredBlockSize(preferredBlockSize),
10349  m_MinBlockCount(minBlockCount),
10350  m_MaxBlockCount(maxBlockCount),
10351  m_BufferImageGranularity(bufferImageGranularity),
10352  m_FrameInUseCount(frameInUseCount),
10353  m_IsCustomPool(isCustomPool),
10354  m_ExplicitBlockSize(explicitBlockSize),
10355  m_Algorithm(algorithm),
10356  m_HasEmptyBlock(false),
10357  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10358  m_pDefragmentator(VMA_NULL),
10359  m_NextBlockId(0)
10360 {
10361 }
10362 
10363 VmaBlockVector::~VmaBlockVector()
10364 {
10365  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10366 
10367  for(size_t i = m_Blocks.size(); i--; )
10368  {
10369  m_Blocks[i]->Destroy(m_hAllocator);
10370  vma_delete(m_hAllocator, m_Blocks[i]);
10371  }
10372 }
10373 
10374 VkResult VmaBlockVector::CreateMinBlocks()
10375 {
10376  for(size_t i = 0; i < m_MinBlockCount; ++i)
10377  {
10378  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10379  if(res != VK_SUCCESS)
10380  {
10381  return res;
10382  }
10383  }
10384  return VK_SUCCESS;
10385 }
10386 
10387 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10388 {
10389  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10390 
10391  const size_t blockCount = m_Blocks.size();
10392 
10393  pStats->size = 0;
10394  pStats->unusedSize = 0;
10395  pStats->allocationCount = 0;
10396  pStats->unusedRangeCount = 0;
10397  pStats->unusedRangeSizeMax = 0;
10398  pStats->blockCount = blockCount;
10399 
10400  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10401  {
10402  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10403  VMA_ASSERT(pBlock);
10404  VMA_HEAVY_ASSERT(pBlock->Validate());
10405  pBlock->m_pMetadata->AddPoolStats(*pStats);
10406  }
10407 }
10408 
10409 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10410 {
10411  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10412  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10413  (VMA_DEBUG_MARGIN > 0) &&
10414  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10415 }
10416 
10417 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10418 
10419 VkResult VmaBlockVector::Allocate(
10420  VmaPool hCurrentPool,
10421  uint32_t currentFrameIndex,
10422  VkDeviceSize size,
10423  VkDeviceSize alignment,
10424  const VmaAllocationCreateInfo& createInfo,
10425  VmaSuballocationType suballocType,
10426  VmaAllocation* pAllocation)
10427 {
10428  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10429  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10430  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10431  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10432  const bool canCreateNewBlock =
10433  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10434  (m_Blocks.size() < m_MaxBlockCount);
10435  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10436 
10437  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10438  // Which in turn is available only when maxBlockCount = 1.
10439  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10440  {
10441  canMakeOtherLost = false;
10442  }
10443 
10444  // Upper address can only be used with linear allocator and within single memory block.
10445  if(isUpperAddress &&
10446  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10447  {
10448  return VK_ERROR_FEATURE_NOT_PRESENT;
10449  }
10450 
10451  // Validate strategy.
10452  switch(strategy)
10453  {
10454  case 0:
10456  break;
10460  break;
10461  default:
10462  return VK_ERROR_FEATURE_NOT_PRESENT;
10463  }
10464 
10465  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10466  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10467  {
10468  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10469  }
10470 
10471  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10472 
10473  /*
10474  Under certain condition, this whole section can be skipped for optimization, so
10475  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10476  e.g. for custom pools with linear algorithm.
10477  */
10478  if(!canMakeOtherLost || canCreateNewBlock)
10479  {
10480  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10481  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10483 
10484  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10485  {
10486  // Use only last block.
10487  if(!m_Blocks.empty())
10488  {
10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10490  VMA_ASSERT(pCurrBlock);
10491  VkResult res = AllocateFromBlock(
10492  pCurrBlock,
10493  hCurrentPool,
10494  currentFrameIndex,
10495  size,
10496  alignment,
10497  allocFlagsCopy,
10498  createInfo.pUserData,
10499  suballocType,
10500  strategy,
10501  pAllocation);
10502  if(res == VK_SUCCESS)
10503  {
10504  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10505  return VK_SUCCESS;
10506  }
10507  }
10508  }
10509  else
10510  {
10512  {
10513  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10514  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10515  {
10516  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10517  VMA_ASSERT(pCurrBlock);
10518  VkResult res = AllocateFromBlock(
10519  pCurrBlock,
10520  hCurrentPool,
10521  currentFrameIndex,
10522  size,
10523  alignment,
10524  allocFlagsCopy,
10525  createInfo.pUserData,
10526  suballocType,
10527  strategy,
10528  pAllocation);
10529  if(res == VK_SUCCESS)
10530  {
10531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10532  return VK_SUCCESS;
10533  }
10534  }
10535  }
10536  else // WORST_FIT, FIRST_FIT
10537  {
10538  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10539  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10540  {
10541  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10542  VMA_ASSERT(pCurrBlock);
10543  VkResult res = AllocateFromBlock(
10544  pCurrBlock,
10545  hCurrentPool,
10546  currentFrameIndex,
10547  size,
10548  alignment,
10549  allocFlagsCopy,
10550  createInfo.pUserData,
10551  suballocType,
10552  strategy,
10553  pAllocation);
10554  if(res == VK_SUCCESS)
10555  {
10556  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10557  return VK_SUCCESS;
10558  }
10559  }
10560  }
10561  }
10562 
10563  // 2. Try to create new block.
10564  if(canCreateNewBlock)
10565  {
10566  // Calculate optimal size for new block.
10567  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10568  uint32_t newBlockSizeShift = 0;
10569  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10570 
10571  if(!m_ExplicitBlockSize)
10572  {
10573  // Allocate 1/8, 1/4, 1/2 as first blocks.
10574  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10575  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10576  {
10577  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10578  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10579  {
10580  newBlockSize = smallerNewBlockSize;
10581  ++newBlockSizeShift;
10582  }
10583  else
10584  {
10585  break;
10586  }
10587  }
10588  }
10589 
10590  size_t newBlockIndex = 0;
10591  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10592  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10593  if(!m_ExplicitBlockSize)
10594  {
10595  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10596  {
10597  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10598  if(smallerNewBlockSize >= size)
10599  {
10600  newBlockSize = smallerNewBlockSize;
10601  ++newBlockSizeShift;
10602  res = CreateBlock(newBlockSize, &newBlockIndex);
10603  }
10604  else
10605  {
10606  break;
10607  }
10608  }
10609  }
10610 
10611  if(res == VK_SUCCESS)
10612  {
10613  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10614  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10615 
10616  res = AllocateFromBlock(
10617  pBlock,
10618  hCurrentPool,
10619  currentFrameIndex,
10620  size,
10621  alignment,
10622  allocFlagsCopy,
10623  createInfo.pUserData,
10624  suballocType,
10625  strategy,
10626  pAllocation);
10627  if(res == VK_SUCCESS)
10628  {
10629  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10630  return VK_SUCCESS;
10631  }
10632  else
10633  {
10634  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10636  }
10637  }
10638  }
10639  }
10640 
10641  // 3. Try to allocate from existing blocks with making other allocations lost.
10642  if(canMakeOtherLost)
10643  {
10644  uint32_t tryIndex = 0;
10645  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10646  {
10647  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10648  VmaAllocationRequest bestRequest = {};
10649  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10650 
10651  // 1. Search existing allocations.
10653  {
10654  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10655  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10656  {
10657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10658  VMA_ASSERT(pCurrBlock);
10659  VmaAllocationRequest currRequest = {};
10660  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10661  currentFrameIndex,
10662  m_FrameInUseCount,
10663  m_BufferImageGranularity,
10664  size,
10665  alignment,
10666  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10667  suballocType,
10668  canMakeOtherLost,
10669  strategy,
10670  &currRequest))
10671  {
10672  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10673  if(pBestRequestBlock == VMA_NULL ||
10674  currRequestCost < bestRequestCost)
10675  {
10676  pBestRequestBlock = pCurrBlock;
10677  bestRequest = currRequest;
10678  bestRequestCost = currRequestCost;
10679 
10680  if(bestRequestCost == 0)
10681  {
10682  break;
10683  }
10684  }
10685  }
10686  }
10687  }
10688  else // WORST_FIT, FIRST_FIT
10689  {
10690  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10691  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10692  {
10693  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10694  VMA_ASSERT(pCurrBlock);
10695  VmaAllocationRequest currRequest = {};
10696  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10697  currentFrameIndex,
10698  m_FrameInUseCount,
10699  m_BufferImageGranularity,
10700  size,
10701  alignment,
10702  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10703  suballocType,
10704  canMakeOtherLost,
10705  strategy,
10706  &currRequest))
10707  {
10708  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10709  if(pBestRequestBlock == VMA_NULL ||
10710  currRequestCost < bestRequestCost ||
10712  {
10713  pBestRequestBlock = pCurrBlock;
10714  bestRequest = currRequest;
10715  bestRequestCost = currRequestCost;
10716 
10717  if(bestRequestCost == 0 ||
10719  {
10720  break;
10721  }
10722  }
10723  }
10724  }
10725  }
10726 
10727  if(pBestRequestBlock != VMA_NULL)
10728  {
10729  if(mapped)
10730  {
10731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10732  if(res != VK_SUCCESS)
10733  {
10734  return res;
10735  }
10736  }
10737 
10738  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10739  currentFrameIndex,
10740  m_FrameInUseCount,
10741  &bestRequest))
10742  {
10743  // We no longer have an empty Allocation.
10744  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10745  {
10746  m_HasEmptyBlock = false;
10747  }
10748  // Allocate from this pBlock.
10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10750  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10751  (*pAllocation)->InitBlockAllocation(
10752  hCurrentPool,
10753  pBestRequestBlock,
10754  bestRequest.offset,
10755  alignment,
10756  size,
10757  suballocType,
10758  mapped,
10759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10763  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10764  {
10765  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10766  }
10767  if(IsCorruptionDetectionEnabled())
10768  {
10769  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10770  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10771  }
10772  return VK_SUCCESS;
10773  }
10774  // else: Some allocations must have been touched while we are here. Next try.
10775  }
10776  else
10777  {
10778  // Could not find place in any of the blocks - break outer loop.
10779  break;
10780  }
10781  }
10782  /* Maximum number of tries exceeded - a very unlike event when many other
10783  threads are simultaneously touching allocations making it impossible to make
10784  lost at the same time as we try to allocate. */
10785  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10786  {
10787  return VK_ERROR_TOO_MANY_OBJECTS;
10788  }
10789  }
10790 
10791  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10792 }
10793 
10794 void VmaBlockVector::Free(
10795  VmaAllocation hAllocation)
10796 {
10797  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10798 
10799  // Scope for lock.
10800  {
10801  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10802 
10803  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10804 
10805  if(IsCorruptionDetectionEnabled())
10806  {
10807  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10808  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10809  }
10810 
10811  if(hAllocation->IsPersistentMap())
10812  {
10813  pBlock->Unmap(m_hAllocator, 1);
10814  }
10815 
10816  pBlock->m_pMetadata->Free(hAllocation);
10817  VMA_HEAVY_ASSERT(pBlock->Validate());
10818 
10819  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10820 
10821  // pBlock became empty after this deallocation.
10822  if(pBlock->m_pMetadata->IsEmpty())
10823  {
10824  // Already has empty Allocation. We don't want to have two, so delete this one.
10825  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10826  {
10827  pBlockToDelete = pBlock;
10828  Remove(pBlock);
10829  }
10830  // We now have first empty block.
10831  else
10832  {
10833  m_HasEmptyBlock = true;
10834  }
10835  }
10836  // pBlock didn't become empty, but we have another empty block - find and free that one.
10837  // (This is optional, heuristics.)
10838  else if(m_HasEmptyBlock)
10839  {
10840  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10841  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10842  {
10843  pBlockToDelete = pLastBlock;
10844  m_Blocks.pop_back();
10845  m_HasEmptyBlock = false;
10846  }
10847  }
10848 
10849  IncrementallySortBlocks();
10850  }
10851 
10852  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10853  // lock, for performance reason.
10854  if(pBlockToDelete != VMA_NULL)
10855  {
10856  VMA_DEBUG_LOG(" Deleted empty allocation");
10857  pBlockToDelete->Destroy(m_hAllocator);
10858  vma_delete(m_hAllocator, pBlockToDelete);
10859  }
10860 }
10861 
10862 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10863 {
10864  VkDeviceSize result = 0;
10865  for(size_t i = m_Blocks.size(); i--; )
10866  {
10867  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10868  if(result >= m_PreferredBlockSize)
10869  {
10870  break;
10871  }
10872  }
10873  return result;
10874 }
10875 
10876 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10877 {
10878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10879  {
10880  if(m_Blocks[blockIndex] == pBlock)
10881  {
10882  VmaVectorRemove(m_Blocks, blockIndex);
10883  return;
10884  }
10885  }
10886  VMA_ASSERT(0);
10887 }
10888 
10889 void VmaBlockVector::IncrementallySortBlocks()
10890 {
10891  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10892  {
10893  // Bubble sort only until first swap.
10894  for(size_t i = 1; i < m_Blocks.size(); ++i)
10895  {
10896  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10897  {
10898  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10899  return;
10900  }
10901  }
10902  }
10903 }
10904 
10905 VkResult VmaBlockVector::AllocateFromBlock(
10906  VmaDeviceMemoryBlock* pBlock,
10907  VmaPool hCurrentPool,
10908  uint32_t currentFrameIndex,
10909  VkDeviceSize size,
10910  VkDeviceSize alignment,
10911  VmaAllocationCreateFlags allocFlags,
10912  void* pUserData,
10913  VmaSuballocationType suballocType,
10914  uint32_t strategy,
10915  VmaAllocation* pAllocation)
10916 {
10917  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10918  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10919  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10920  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10921 
10922  VmaAllocationRequest currRequest = {};
10923  if(pBlock->m_pMetadata->CreateAllocationRequest(
10924  currentFrameIndex,
10925  m_FrameInUseCount,
10926  m_BufferImageGranularity,
10927  size,
10928  alignment,
10929  isUpperAddress,
10930  suballocType,
10931  false, // canMakeOtherLost
10932  strategy,
10933  &currRequest))
10934  {
10935  // Allocate from pCurrBlock.
10936  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10937 
10938  if(mapped)
10939  {
10940  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10941  if(res != VK_SUCCESS)
10942  {
10943  return res;
10944  }
10945  }
10946 
10947  // We no longer have an empty Allocation.
10948  if(pBlock->m_pMetadata->IsEmpty())
10949  {
10950  m_HasEmptyBlock = false;
10951  }
10952 
10953  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10954  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10955  (*pAllocation)->InitBlockAllocation(
10956  hCurrentPool,
10957  pBlock,
10958  currRequest.offset,
10959  alignment,
10960  size,
10961  suballocType,
10962  mapped,
10963  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10964  VMA_HEAVY_ASSERT(pBlock->Validate());
10965  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10966  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10967  {
10968  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10969  }
10970  if(IsCorruptionDetectionEnabled())
10971  {
10972  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10973  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10974  }
10975  return VK_SUCCESS;
10976  }
10977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10978 }
10979 
10980 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10981 {
10982  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10983  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10984  allocInfo.allocationSize = blockSize;
10985  VkDeviceMemory mem = VK_NULL_HANDLE;
10986  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10987  if(res < 0)
10988  {
10989  return res;
10990  }
10991 
10992  // New VkDeviceMemory successfully created.
10993 
10994  // Create new Allocation for it.
10995  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10996  pBlock->Init(
10997  m_hAllocator,
10998  m_MemoryTypeIndex,
10999  mem,
11000  allocInfo.allocationSize,
11001  m_NextBlockId++,
11002  m_Algorithm);
11003 
11004  m_Blocks.push_back(pBlock);
11005  if(pNewBlockIndex != VMA_NULL)
11006  {
11007  *pNewBlockIndex = m_Blocks.size() - 1;
11008  }
11009 
11010  return VK_SUCCESS;
11011 }
11012 
11013 #if VMA_STATS_STRING_ENABLED
11014 
11015 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11016 {
11017  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11018 
11019  json.BeginObject();
11020 
11021  if(m_IsCustomPool)
11022  {
11023  json.WriteString("MemoryTypeIndex");
11024  json.WriteNumber(m_MemoryTypeIndex);
11025 
11026  json.WriteString("BlockSize");
11027  json.WriteNumber(m_PreferredBlockSize);
11028 
11029  json.WriteString("BlockCount");
11030  json.BeginObject(true);
11031  if(m_MinBlockCount > 0)
11032  {
11033  json.WriteString("Min");
11034  json.WriteNumber((uint64_t)m_MinBlockCount);
11035  }
11036  if(m_MaxBlockCount < SIZE_MAX)
11037  {
11038  json.WriteString("Max");
11039  json.WriteNumber((uint64_t)m_MaxBlockCount);
11040  }
11041  json.WriteString("Cur");
11042  json.WriteNumber((uint64_t)m_Blocks.size());
11043  json.EndObject();
11044 
11045  if(m_FrameInUseCount > 0)
11046  {
11047  json.WriteString("FrameInUseCount");
11048  json.WriteNumber(m_FrameInUseCount);
11049  }
11050 
11051  if(m_Algorithm != 0)
11052  {
11053  json.WriteString("Algorithm");
11054  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11055  }
11056  }
11057  else
11058  {
11059  json.WriteString("PreferredBlockSize");
11060  json.WriteNumber(m_PreferredBlockSize);
11061  }
11062 
11063  json.WriteString("Blocks");
11064  json.BeginObject();
11065  for(size_t i = 0; i < m_Blocks.size(); ++i)
11066  {
11067  json.BeginString();
11068  json.ContinueString(m_Blocks[i]->GetId());
11069  json.EndString();
11070 
11071  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11072  }
11073  json.EndObject();
11074 
11075  json.EndObject();
11076 }
11077 
11078 #endif // #if VMA_STATS_STRING_ENABLED
11079 
11080 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11081  VmaAllocator hAllocator,
11082  uint32_t currentFrameIndex)
11083 {
11084  if(m_pDefragmentator == VMA_NULL)
11085  {
11086  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11087  hAllocator,
11088  this,
11089  currentFrameIndex);
11090  }
11091 
11092  return m_pDefragmentator;
11093 }
11094 
11095 VkResult VmaBlockVector::Defragment(
11096  VmaDefragmentationStats* pDefragmentationStats,
11097  VkDeviceSize& maxBytesToMove,
11098  uint32_t& maxAllocationsToMove)
11099 {
11100  if(m_pDefragmentator == VMA_NULL)
11101  {
11102  return VK_SUCCESS;
11103  }
11104 
11105  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11106 
11107  // Defragment.
11108  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11109 
11110  // Accumulate statistics.
11111  if(pDefragmentationStats != VMA_NULL)
11112  {
11113  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11114  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11115  pDefragmentationStats->bytesMoved += bytesMoved;
11116  pDefragmentationStats->allocationsMoved += allocationsMoved;
11117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11119  maxBytesToMove -= bytesMoved;
11120  maxAllocationsToMove -= allocationsMoved;
11121  }
11122 
11123  // Free empty blocks.
11124  m_HasEmptyBlock = false;
11125  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11126  {
11127  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11128  if(pBlock->m_pMetadata->IsEmpty())
11129  {
11130  if(m_Blocks.size() > m_MinBlockCount)
11131  {
11132  if(pDefragmentationStats != VMA_NULL)
11133  {
11134  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11135  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11136  }
11137 
11138  VmaVectorRemove(m_Blocks, blockIndex);
11139  pBlock->Destroy(m_hAllocator);
11140  vma_delete(m_hAllocator, pBlock);
11141  }
11142  else
11143  {
11144  m_HasEmptyBlock = true;
11145  }
11146  }
11147  }
11148 
11149  return result;
11150 }
11151 
11152 void VmaBlockVector::DestroyDefragmentator()
11153 {
11154  if(m_pDefragmentator != VMA_NULL)
11155  {
11156  vma_delete(m_hAllocator, m_pDefragmentator);
11157  m_pDefragmentator = VMA_NULL;
11158  }
11159 }
11160 
11161 void VmaBlockVector::MakePoolAllocationsLost(
11162  uint32_t currentFrameIndex,
11163  size_t* pLostAllocationCount)
11164 {
11165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11166  size_t lostAllocationCount = 0;
11167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11168  {
11169  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11170  VMA_ASSERT(pBlock);
11171  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11172  }
11173  if(pLostAllocationCount != VMA_NULL)
11174  {
11175  *pLostAllocationCount = lostAllocationCount;
11176  }
11177 }
11178 
11179 VkResult VmaBlockVector::CheckCorruption()
11180 {
11181  if(!IsCorruptionDetectionEnabled())
11182  {
11183  return VK_ERROR_FEATURE_NOT_PRESENT;
11184  }
11185 
11186  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11187  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11188  {
11189  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11190  VMA_ASSERT(pBlock);
11191  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11192  if(res != VK_SUCCESS)
11193  {
11194  return res;
11195  }
11196  }
11197  return VK_SUCCESS;
11198 }
11199 
11200 void VmaBlockVector::AddStats(VmaStats* pStats)
11201 {
11202  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11203  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11204 
11205  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11206 
11207  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11208  {
11209  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11210  VMA_ASSERT(pBlock);
11211  VMA_HEAVY_ASSERT(pBlock->Validate());
11212  VmaStatInfo allocationStatInfo;
11213  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11214  VmaAddStatInfo(pStats->total, allocationStatInfo);
11215  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11216  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11217  }
11218 }
11219 
11221 // VmaDefragmentator members definition
11222 
11223 VmaDefragmentator::VmaDefragmentator(
11224  VmaAllocator hAllocator,
11225  VmaBlockVector* pBlockVector,
11226  uint32_t currentFrameIndex) :
11227  m_hAllocator(hAllocator),
11228  m_pBlockVector(pBlockVector),
11229  m_CurrentFrameIndex(currentFrameIndex),
11230  m_BytesMoved(0),
11231  m_AllocationsMoved(0),
11232  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11233  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11234 {
11235  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11236 }
11237 
11238 VmaDefragmentator::~VmaDefragmentator()
11239 {
11240  for(size_t i = m_Blocks.size(); i--; )
11241  {
11242  vma_delete(m_hAllocator, m_Blocks[i]);
11243  }
11244 }
11245 
11246 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11247 {
11248  AllocationInfo allocInfo;
11249  allocInfo.m_hAllocation = hAlloc;
11250  allocInfo.m_pChanged = pChanged;
11251  m_Allocations.push_back(allocInfo);
11252 }
11253 
11254 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11255 {
11256  // It has already been mapped for defragmentation.
11257  if(m_pMappedDataForDefragmentation)
11258  {
11259  *ppMappedData = m_pMappedDataForDefragmentation;
11260  return VK_SUCCESS;
11261  }
11262 
11263  // It is originally mapped.
11264  if(m_pBlock->GetMappedData())
11265  {
11266  *ppMappedData = m_pBlock->GetMappedData();
11267  return VK_SUCCESS;
11268  }
11269 
11270  // Map on first usage.
11271  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11272  *ppMappedData = m_pMappedDataForDefragmentation;
11273  return res;
11274 }
11275 
11276 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11277 {
11278  if(m_pMappedDataForDefragmentation != VMA_NULL)
11279  {
11280  m_pBlock->Unmap(hAllocator, 1);
11281  }
11282 }
11283 
11284 VkResult VmaDefragmentator::DefragmentRound(
11285  VkDeviceSize maxBytesToMove,
11286  uint32_t maxAllocationsToMove)
11287 {
11288  if(m_Blocks.empty())
11289  {
11290  return VK_SUCCESS;
11291  }
11292 
11293  size_t srcBlockIndex = m_Blocks.size() - 1;
11294  size_t srcAllocIndex = SIZE_MAX;
11295  for(;;)
11296  {
11297  // 1. Find next allocation to move.
11298  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11299  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11300  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11301  {
11302  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11303  {
11304  // Finished: no more allocations to process.
11305  if(srcBlockIndex == 0)
11306  {
11307  return VK_SUCCESS;
11308  }
11309  else
11310  {
11311  --srcBlockIndex;
11312  srcAllocIndex = SIZE_MAX;
11313  }
11314  }
11315  else
11316  {
11317  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11318  }
11319  }
11320 
11321  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11322  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11323 
11324  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11325  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11326  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11327  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11328 
11329  // 2. Try to find new place for this allocation in preceding or current block.
11330  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11331  {
11332  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11333  VmaAllocationRequest dstAllocRequest;
11334  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11335  m_CurrentFrameIndex,
11336  m_pBlockVector->GetFrameInUseCount(),
11337  m_pBlockVector->GetBufferImageGranularity(),
11338  size,
11339  alignment,
11340  false, // upperAddress
11341  suballocType,
11342  false, // canMakeOtherLost
11344  &dstAllocRequest) &&
11345  MoveMakesSense(
11346  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11347  {
11348  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11349 
11350  // Reached limit on number of allocations or bytes to move.
11351  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11352  (m_BytesMoved + size > maxBytesToMove))
11353  {
11354  return VK_INCOMPLETE;
11355  }
11356 
11357  void* pDstMappedData = VMA_NULL;
11358  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11359  if(res != VK_SUCCESS)
11360  {
11361  return res;
11362  }
11363 
11364  void* pSrcMappedData = VMA_NULL;
11365  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11366  if(res != VK_SUCCESS)
11367  {
11368  return res;
11369  }
11370 
11371  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11372  memcpy(
11373  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11374  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11375  static_cast<size_t>(size));
11376 
11377  if(VMA_DEBUG_MARGIN > 0)
11378  {
11379  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11380  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11381  }
11382 
11383  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11384  dstAllocRequest,
11385  suballocType,
11386  size,
11387  false, // upperAddress
11388  allocInfo.m_hAllocation);
11389  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11390 
11391  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11392 
11393  if(allocInfo.m_pChanged != VMA_NULL)
11394  {
11395  *allocInfo.m_pChanged = VK_TRUE;
11396  }
11397 
11398  ++m_AllocationsMoved;
11399  m_BytesMoved += size;
11400 
11401  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11402 
11403  break;
11404  }
11405  }
11406 
11407  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11408 
11409  if(srcAllocIndex > 0)
11410  {
11411  --srcAllocIndex;
11412  }
11413  else
11414  {
11415  if(srcBlockIndex > 0)
11416  {
11417  --srcBlockIndex;
11418  srcAllocIndex = SIZE_MAX;
11419  }
11420  else
11421  {
11422  return VK_SUCCESS;
11423  }
11424  }
11425  }
11426 }
11427 
11428 VkResult VmaDefragmentator::Defragment(
11429  VkDeviceSize maxBytesToMove,
11430  uint32_t maxAllocationsToMove)
11431 {
11432  if(m_Allocations.empty())
11433  {
11434  return VK_SUCCESS;
11435  }
11436 
11437  // Create block info for each block.
11438  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11439  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11440  {
11441  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11442  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11443  m_Blocks.push_back(pBlockInfo);
11444  }
11445 
11446  // Sort them by m_pBlock pointer value.
11447  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11448 
11449  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11450  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11451  {
11452  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11453  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11454  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11455  {
11456  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11457  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11458  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11459  {
11460  (*it)->m_Allocations.push_back(allocInfo);
11461  }
11462  else
11463  {
11464  VMA_ASSERT(0);
11465  }
11466  }
11467  }
11468  m_Allocations.clear();
11469 
11470  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11471  {
11472  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11473  pBlockInfo->CalcHasNonMovableAllocations();
11474  pBlockInfo->SortAllocationsBySizeDescecnding();
11475  }
11476 
11477  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11478  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11479 
11480  // Execute defragmentation rounds (the main part).
11481  VkResult result = VK_SUCCESS;
11482  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11483  {
11484  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11485  }
11486 
11487  // Unmap blocks that were mapped for defragmentation.
11488  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11489  {
11490  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11491  }
11492 
11493  return result;
11494 }
11495 
11496 bool VmaDefragmentator::MoveMakesSense(
11497  size_t dstBlockIndex, VkDeviceSize dstOffset,
11498  size_t srcBlockIndex, VkDeviceSize srcOffset)
11499 {
11500  if(dstBlockIndex < srcBlockIndex)
11501  {
11502  return true;
11503  }
11504  if(dstBlockIndex > srcBlockIndex)
11505  {
11506  return false;
11507  }
11508  if(dstOffset < srcOffset)
11509  {
11510  return true;
11511  }
11512  return false;
11513 }
11514 
11516 // VmaRecorder
11517 
11518 #if VMA_RECORDING_ENABLED
11519 
11520 VmaRecorder::VmaRecorder() :
11521  m_UseMutex(true),
11522  m_Flags(0),
11523  m_File(VMA_NULL),
11524  m_Freq(INT64_MAX),
11525  m_StartCounter(INT64_MAX)
11526 {
11527 }
11528 
11529 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11530 {
11531  m_UseMutex = useMutex;
11532  m_Flags = settings.flags;
11533 
11534  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11535  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11536 
11537  // Open file for writing.
11538  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11539  if(err != 0)
11540  {
11541  return VK_ERROR_INITIALIZATION_FAILED;
11542  }
11543 
11544  // Write header.
11545  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11546  fprintf(m_File, "%s\n", "1,4");
11547 
11548  return VK_SUCCESS;
11549 }
11550 
11551 VmaRecorder::~VmaRecorder()
11552 {
11553  if(m_File != VMA_NULL)
11554  {
11555  fclose(m_File);
11556  }
11557 }
11558 
11559 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11560 {
11561  CallParams callParams;
11562  GetBasicParams(callParams);
11563 
11564  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11565  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11566  Flush();
11567 }
11568 
11569 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11570 {
11571  CallParams callParams;
11572  GetBasicParams(callParams);
11573 
11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11575  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11576  Flush();
11577 }
11578 
11579 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11580 {
11581  CallParams callParams;
11582  GetBasicParams(callParams);
11583 
11584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11585  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11586  createInfo.memoryTypeIndex,
11587  createInfo.flags,
11588  createInfo.blockSize,
11589  (uint64_t)createInfo.minBlockCount,
11590  (uint64_t)createInfo.maxBlockCount,
11591  createInfo.frameInUseCount,
11592  pool);
11593  Flush();
11594 }
11595 
11596 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11597 {
11598  CallParams callParams;
11599  GetBasicParams(callParams);
11600 
11601  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11602  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11603  pool);
11604  Flush();
11605 }
11606 
11607 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11608  const VkMemoryRequirements& vkMemReq,
11609  const VmaAllocationCreateInfo& createInfo,
11610  VmaAllocation allocation)
11611 {
11612  CallParams callParams;
11613  GetBasicParams(callParams);
11614 
11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11618  vkMemReq.size,
11619  vkMemReq.alignment,
11620  vkMemReq.memoryTypeBits,
11621  createInfo.flags,
11622  createInfo.usage,
11623  createInfo.requiredFlags,
11624  createInfo.preferredFlags,
11625  createInfo.memoryTypeBits,
11626  createInfo.pool,
11627  allocation,
11628  userDataStr.GetString());
11629  Flush();
11630 }
11631 
11632 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11633  const VkMemoryRequirements& vkMemReq,
11634  bool requiresDedicatedAllocation,
11635  bool prefersDedicatedAllocation,
11636  const VmaAllocationCreateInfo& createInfo,
11637  VmaAllocation allocation)
11638 {
11639  CallParams callParams;
11640  GetBasicParams(callParams);
11641 
11642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11643  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11644  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11645  vkMemReq.size,
11646  vkMemReq.alignment,
11647  vkMemReq.memoryTypeBits,
11648  requiresDedicatedAllocation ? 1 : 0,
11649  prefersDedicatedAllocation ? 1 : 0,
11650  createInfo.flags,
11651  createInfo.usage,
11652  createInfo.requiredFlags,
11653  createInfo.preferredFlags,
11654  createInfo.memoryTypeBits,
11655  createInfo.pool,
11656  allocation,
11657  userDataStr.GetString());
11658  Flush();
11659 }
11660 
11661 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11662  const VkMemoryRequirements& vkMemReq,
11663  bool requiresDedicatedAllocation,
11664  bool prefersDedicatedAllocation,
11665  const VmaAllocationCreateInfo& createInfo,
11666  VmaAllocation allocation)
11667 {
11668  CallParams callParams;
11669  GetBasicParams(callParams);
11670 
11671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11674  vkMemReq.size,
11675  vkMemReq.alignment,
11676  vkMemReq.memoryTypeBits,
11677  requiresDedicatedAllocation ? 1 : 0,
11678  prefersDedicatedAllocation ? 1 : 0,
11679  createInfo.flags,
11680  createInfo.usage,
11681  createInfo.requiredFlags,
11682  createInfo.preferredFlags,
11683  createInfo.memoryTypeBits,
11684  createInfo.pool,
11685  allocation,
11686  userDataStr.GetString());
11687  Flush();
11688 }
11689 
11690 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11691  VmaAllocation allocation)
11692 {
11693  CallParams callParams;
11694  GetBasicParams(callParams);
11695 
11696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11697  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11698  allocation);
11699  Flush();
11700 }
11701 
11702 void VmaRecorder::RecordResizeAllocation(
11703  uint32_t frameIndex,
11704  VmaAllocation allocation,
11705  VkDeviceSize newSize)
11706 {
11707  CallParams callParams;
11708  GetBasicParams(callParams);
11709 
11710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11711  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11712  allocation, newSize);
11713  Flush();
11714 }
11715 
11716 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11717  VmaAllocation allocation,
11718  const void* pUserData)
11719 {
11720  CallParams callParams;
11721  GetBasicParams(callParams);
11722 
11723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11724  UserDataString userDataStr(
11725  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11726  pUserData);
11727  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11728  allocation,
11729  userDataStr.GetString());
11730  Flush();
11731 }
11732 
11733 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11734  VmaAllocation allocation)
11735 {
11736  CallParams callParams;
11737  GetBasicParams(callParams);
11738 
11739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11740  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11741  allocation);
11742  Flush();
11743 }
11744 
11745 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11746  VmaAllocation allocation)
11747 {
11748  CallParams callParams;
11749  GetBasicParams(callParams);
11750 
11751  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11752  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11753  allocation);
11754  Flush();
11755 }
11756 
11757 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11758  VmaAllocation allocation)
11759 {
11760  CallParams callParams;
11761  GetBasicParams(callParams);
11762 
11763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11764  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11765  allocation);
11766  Flush();
11767 }
11768 
11769 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11770  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11771 {
11772  CallParams callParams;
11773  GetBasicParams(callParams);
11774 
11775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11776  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11777  allocation,
11778  offset,
11779  size);
11780  Flush();
11781 }
11782 
11783 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11784  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11785 {
11786  CallParams callParams;
11787  GetBasicParams(callParams);
11788 
11789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11790  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11791  allocation,
11792  offset,
11793  size);
11794  Flush();
11795 }
11796 
11797 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11798  const VkBufferCreateInfo& bufCreateInfo,
11799  const VmaAllocationCreateInfo& allocCreateInfo,
11800  VmaAllocation allocation)
11801 {
11802  CallParams callParams;
11803  GetBasicParams(callParams);
11804 
11805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11806  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11807  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11808  bufCreateInfo.flags,
11809  bufCreateInfo.size,
11810  bufCreateInfo.usage,
11811  bufCreateInfo.sharingMode,
11812  allocCreateInfo.flags,
11813  allocCreateInfo.usage,
11814  allocCreateInfo.requiredFlags,
11815  allocCreateInfo.preferredFlags,
11816  allocCreateInfo.memoryTypeBits,
11817  allocCreateInfo.pool,
11818  allocation,
11819  userDataStr.GetString());
11820  Flush();
11821 }
11822 
11823 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11824  const VkImageCreateInfo& imageCreateInfo,
11825  const VmaAllocationCreateInfo& allocCreateInfo,
11826  VmaAllocation allocation)
11827 {
11828  CallParams callParams;
11829  GetBasicParams(callParams);
11830 
11831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11832  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11833  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11834  imageCreateInfo.flags,
11835  imageCreateInfo.imageType,
11836  imageCreateInfo.format,
11837  imageCreateInfo.extent.width,
11838  imageCreateInfo.extent.height,
11839  imageCreateInfo.extent.depth,
11840  imageCreateInfo.mipLevels,
11841  imageCreateInfo.arrayLayers,
11842  imageCreateInfo.samples,
11843  imageCreateInfo.tiling,
11844  imageCreateInfo.usage,
11845  imageCreateInfo.sharingMode,
11846  imageCreateInfo.initialLayout,
11847  allocCreateInfo.flags,
11848  allocCreateInfo.usage,
11849  allocCreateInfo.requiredFlags,
11850  allocCreateInfo.preferredFlags,
11851  allocCreateInfo.memoryTypeBits,
11852  allocCreateInfo.pool,
11853  allocation,
11854  userDataStr.GetString());
11855  Flush();
11856 }
11857 
11858 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11859  VmaAllocation allocation)
11860 {
11861  CallParams callParams;
11862  GetBasicParams(callParams);
11863 
11864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11865  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11866  allocation);
11867  Flush();
11868 }
11869 
11870 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11871  VmaAllocation allocation)
11872 {
11873  CallParams callParams;
11874  GetBasicParams(callParams);
11875 
11876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11877  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11878  allocation);
11879  Flush();
11880 }
11881 
11882 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11883  VmaAllocation allocation)
11884 {
11885  CallParams callParams;
11886  GetBasicParams(callParams);
11887 
11888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11889  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11890  allocation);
11891  Flush();
11892 }
11893 
11894 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11895  VmaAllocation allocation)
11896 {
11897  CallParams callParams;
11898  GetBasicParams(callParams);
11899 
11900  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11901  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11902  allocation);
11903  Flush();
11904 }
11905 
11906 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11907  VmaPool pool)
11908 {
11909  CallParams callParams;
11910  GetBasicParams(callParams);
11911 
11912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11913  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11914  pool);
11915  Flush();
11916 }
11917 
11918 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11919 {
11920  if(pUserData != VMA_NULL)
11921  {
11922  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11923  {
11924  m_Str = (const char*)pUserData;
11925  }
11926  else
11927  {
11928  sprintf_s(m_PtrStr, "%p", pUserData);
11929  m_Str = m_PtrStr;
11930  }
11931  }
11932  else
11933  {
11934  m_Str = "";
11935  }
11936 }
11937 
11938 void VmaRecorder::WriteConfiguration(
11939  const VkPhysicalDeviceProperties& devProps,
11940  const VkPhysicalDeviceMemoryProperties& memProps,
11941  bool dedicatedAllocationExtensionEnabled)
11942 {
11943  fprintf(m_File, "Config,Begin\n");
11944 
11945  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11946  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11947  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11948  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11949  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11950  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11951 
11952  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11953  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11954  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11955 
11956  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11957  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11958  {
11959  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11960  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11961  }
11962  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11963  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11964  {
11965  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11966  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11967  }
11968 
11969  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11970 
11971  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11972  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11973  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11974  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11976  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11977  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11978  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11979  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11980 
11981  fprintf(m_File, "Config,End\n");
11982 }
11983 
11984 void VmaRecorder::GetBasicParams(CallParams& outParams)
11985 {
11986  outParams.threadId = GetCurrentThreadId();
11987 
11988  LARGE_INTEGER counter;
11989  QueryPerformanceCounter(&counter);
11990  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11991 }
11992 
11993 void VmaRecorder::Flush()
11994 {
11995  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11996  {
11997  fflush(m_File);
11998  }
11999 }
12000 
12001 #endif // #if VMA_RECORDING_ENABLED
12002 
12004 // VmaAllocator_T
12005 
12006 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12007  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12008  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12009  m_hDevice(pCreateInfo->device),
12010  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12011  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12012  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12013  m_PreferredLargeHeapBlockSize(0),
12014  m_PhysicalDevice(pCreateInfo->physicalDevice),
12015  m_CurrentFrameIndex(0),
12016  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12017  m_NextPoolId(0)
12019  ,m_pRecorder(VMA_NULL)
12020 #endif
12021 {
12022  if(VMA_DEBUG_DETECT_CORRUPTION)
12023  {
12024  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12025  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12026  }
12027 
12028  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12029 
12030 #if !(VMA_DEDICATED_ALLOCATION)
12032  {
12033  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12034  }
12035 #endif
12036 
12037  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12038  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12039  memset(&m_MemProps, 0, sizeof(m_MemProps));
12040 
12041  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12042  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12043 
12044  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12045  {
12046  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12047  }
12048 
12049  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12050  {
12051  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12052  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12053  }
12054 
12055  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12056 
12057  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12058  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12059 
12060  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12062  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12064 
12065  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12066  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12067 
12068  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12069  {
12070  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12071  {
12072  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12073  if(limit != VK_WHOLE_SIZE)
12074  {
12075  m_HeapSizeLimit[heapIndex] = limit;
12076  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12077  {
12078  m_MemProps.memoryHeaps[heapIndex].size = limit;
12079  }
12080  }
12081  }
12082  }
12083 
12084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12085  {
12086  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12087 
12088  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12089  this,
12090  memTypeIndex,
12091  preferredBlockSize,
12092  0,
12093  SIZE_MAX,
12094  GetBufferImageGranularity(),
12095  pCreateInfo->frameInUseCount,
12096  false, // isCustomPool
12097  false, // explicitBlockSize
12098  false); // linearAlgorithm
12099  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12100  // becase minBlockCount is 0.
12101  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12102 
12103  }
12104 }
12105 
12106 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12107 {
12108  VkResult res = VK_SUCCESS;
12109 
12110  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12111  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12112  {
12113 #if VMA_RECORDING_ENABLED
12114  m_pRecorder = vma_new(this, VmaRecorder)();
12115  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12116  if(res != VK_SUCCESS)
12117  {
12118  return res;
12119  }
12120  m_pRecorder->WriteConfiguration(
12121  m_PhysicalDeviceProperties,
12122  m_MemProps,
12123  m_UseKhrDedicatedAllocation);
12124  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12125 #else
12126  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12127  return VK_ERROR_FEATURE_NOT_PRESENT;
12128 #endif
12129  }
12130 
12131  return res;
12132 }
12133 
12134 VmaAllocator_T::~VmaAllocator_T()
12135 {
12136 #if VMA_RECORDING_ENABLED
12137  if(m_pRecorder != VMA_NULL)
12138  {
12139  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12140  vma_delete(this, m_pRecorder);
12141  }
12142 #endif
12143 
12144  VMA_ASSERT(m_Pools.empty());
12145 
12146  for(size_t i = GetMemoryTypeCount(); i--; )
12147  {
12148  vma_delete(this, m_pDedicatedAllocations[i]);
12149  vma_delete(this, m_pBlockVectors[i]);
12150  }
12151 }
12152 
12153 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12154 {
12155 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12156  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12157  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12158  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12159  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12160  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12161  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12162  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12163  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12164  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12165  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12166  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12167  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12168  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12169  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12170  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12171  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12172 #if VMA_DEDICATED_ALLOCATION
12173  if(m_UseKhrDedicatedAllocation)
12174  {
12175  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12176  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12177  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12178  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12179  }
12180 #endif // #if VMA_DEDICATED_ALLOCATION
12181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12182 
12183 #define VMA_COPY_IF_NOT_NULL(funcName) \
12184  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12185 
12186  if(pVulkanFunctions != VMA_NULL)
12187  {
12188  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12189  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12190  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12191  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12192  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12193  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12194  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12195  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12196  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12197  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12198  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12199  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12200  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12201  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12202  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12203  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12204 #if VMA_DEDICATED_ALLOCATION
12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12207 #endif
12208  }
12209 
12210 #undef VMA_COPY_IF_NOT_NULL
12211 
12212  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12213  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12214  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12215  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12216  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12217  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12230 #if VMA_DEDICATED_ALLOCATION
12231  if(m_UseKhrDedicatedAllocation)
12232  {
12233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12235  }
12236 #endif
12237 }
12238 
12239 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12240 {
12241  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12242  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12243  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12244  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12245 }
12246 
12247 VkResult VmaAllocator_T::AllocateMemoryOfType(
12248  VkDeviceSize size,
12249  VkDeviceSize alignment,
12250  bool dedicatedAllocation,
12251  VkBuffer dedicatedBuffer,
12252  VkImage dedicatedImage,
12253  const VmaAllocationCreateInfo& createInfo,
12254  uint32_t memTypeIndex,
12255  VmaSuballocationType suballocType,
12256  VmaAllocation* pAllocation)
12257 {
12258  VMA_ASSERT(pAllocation != VMA_NULL);
12259  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12260 
12261  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12262 
12263  // If memory type is not HOST_VISIBLE, disable MAPPED.
12264  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12265  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12266  {
12267  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12268  }
12269 
12270  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12271  VMA_ASSERT(blockVector);
12272 
12273  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12274  bool preferDedicatedMemory =
12275  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12276  dedicatedAllocation ||
12277  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12278  size > preferredBlockSize / 2;
12279 
12280  if(preferDedicatedMemory &&
12281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12282  finalCreateInfo.pool == VK_NULL_HANDLE)
12283  {
12285  }
12286 
12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12288  {
12289  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12290  {
12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12292  }
12293  else
12294  {
12295  return AllocateDedicatedMemory(
12296  size,
12297  suballocType,
12298  memTypeIndex,
12299  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12301  finalCreateInfo.pUserData,
12302  dedicatedBuffer,
12303  dedicatedImage,
12304  pAllocation);
12305  }
12306  }
12307  else
12308  {
12309  VkResult res = blockVector->Allocate(
12310  VK_NULL_HANDLE, // hCurrentPool
12311  m_CurrentFrameIndex.load(),
12312  size,
12313  alignment,
12314  finalCreateInfo,
12315  suballocType,
12316  pAllocation);
12317  if(res == VK_SUCCESS)
12318  {
12319  return res;
12320  }
12321 
12322  // 5. Try dedicated memory.
12323  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12324  {
12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12326  }
12327  else
12328  {
12329  res = AllocateDedicatedMemory(
12330  size,
12331  suballocType,
12332  memTypeIndex,
12333  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12334  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12335  finalCreateInfo.pUserData,
12336  dedicatedBuffer,
12337  dedicatedImage,
12338  pAllocation);
12339  if(res == VK_SUCCESS)
12340  {
12341  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12342  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12343  return VK_SUCCESS;
12344  }
12345  else
12346  {
12347  // Everything failed: Return error code.
12348  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12349  return res;
12350  }
12351  }
12352  }
12353 }
12354 
12355 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12356  VkDeviceSize size,
12357  VmaSuballocationType suballocType,
12358  uint32_t memTypeIndex,
12359  bool map,
12360  bool isUserDataString,
12361  void* pUserData,
12362  VkBuffer dedicatedBuffer,
12363  VkImage dedicatedImage,
12364  VmaAllocation* pAllocation)
12365 {
12366  VMA_ASSERT(pAllocation);
12367 
12368  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12369  allocInfo.memoryTypeIndex = memTypeIndex;
12370  allocInfo.allocationSize = size;
12371 
12372 #if VMA_DEDICATED_ALLOCATION
12373  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12374  if(m_UseKhrDedicatedAllocation)
12375  {
12376  if(dedicatedBuffer != VK_NULL_HANDLE)
12377  {
12378  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12379  dedicatedAllocInfo.buffer = dedicatedBuffer;
12380  allocInfo.pNext = &dedicatedAllocInfo;
12381  }
12382  else if(dedicatedImage != VK_NULL_HANDLE)
12383  {
12384  dedicatedAllocInfo.image = dedicatedImage;
12385  allocInfo.pNext = &dedicatedAllocInfo;
12386  }
12387  }
12388 #endif // #if VMA_DEDICATED_ALLOCATION
12389 
12390  // Allocate VkDeviceMemory.
12391  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12392  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12393  if(res < 0)
12394  {
12395  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12396  return res;
12397  }
12398 
12399  void* pMappedData = VMA_NULL;
12400  if(map)
12401  {
12402  res = (*m_VulkanFunctions.vkMapMemory)(
12403  m_hDevice,
12404  hMemory,
12405  0,
12406  VK_WHOLE_SIZE,
12407  0,
12408  &pMappedData);
12409  if(res < 0)
12410  {
12411  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12412  FreeVulkanMemory(memTypeIndex, size, hMemory);
12413  return res;
12414  }
12415  }
12416 
12417  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12418  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12419  (*pAllocation)->SetUserData(this, pUserData);
12420  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12421  {
12422  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12423  }
12424 
12425  // Register it in m_pDedicatedAllocations.
12426  {
12427  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12428  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12429  VMA_ASSERT(pDedicatedAllocations);
12430  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12431  }
12432 
12433  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12434 
12435  return VK_SUCCESS;
12436 }
12437 
12438 void VmaAllocator_T::GetBufferMemoryRequirements(
12439  VkBuffer hBuffer,
12440  VkMemoryRequirements& memReq,
12441  bool& requiresDedicatedAllocation,
12442  bool& prefersDedicatedAllocation) const
12443 {
12444 #if VMA_DEDICATED_ALLOCATION
12445  if(m_UseKhrDedicatedAllocation)
12446  {
12447  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12448  memReqInfo.buffer = hBuffer;
12449 
12450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12451 
12452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12453  memReq2.pNext = &memDedicatedReq;
12454 
12455  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12456 
12457  memReq = memReq2.memoryRequirements;
12458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12460  }
12461  else
12462 #endif // #if VMA_DEDICATED_ALLOCATION
12463  {
12464  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12465  requiresDedicatedAllocation = false;
12466  prefersDedicatedAllocation = false;
12467  }
12468 }
12469 
12470 void VmaAllocator_T::GetImageMemoryRequirements(
12471  VkImage hImage,
12472  VkMemoryRequirements& memReq,
12473  bool& requiresDedicatedAllocation,
12474  bool& prefersDedicatedAllocation) const
12475 {
12476 #if VMA_DEDICATED_ALLOCATION
12477  if(m_UseKhrDedicatedAllocation)
12478  {
12479  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12480  memReqInfo.image = hImage;
12481 
12482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12483 
12484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12485  memReq2.pNext = &memDedicatedReq;
12486 
12487  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12488 
12489  memReq = memReq2.memoryRequirements;
12490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12492  }
12493  else
12494 #endif // #if VMA_DEDICATED_ALLOCATION
12495  {
12496  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12497  requiresDedicatedAllocation = false;
12498  prefersDedicatedAllocation = false;
12499  }
12500 }
12501 
12502 VkResult VmaAllocator_T::AllocateMemory(
12503  const VkMemoryRequirements& vkMemReq,
12504  bool requiresDedicatedAllocation,
12505  bool prefersDedicatedAllocation,
12506  VkBuffer dedicatedBuffer,
12507  VkImage dedicatedImage,
12508  const VmaAllocationCreateInfo& createInfo,
12509  VmaSuballocationType suballocType,
12510  VmaAllocation* pAllocation)
12511 {
12512  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12513 
12514  if(vkMemReq.size == 0)
12515  {
12516  return VK_ERROR_VALIDATION_FAILED_EXT;
12517  }
12518  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12519  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12520  {
12521  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12522  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12523  }
12524  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12526  {
12527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12529  }
12530  if(requiresDedicatedAllocation)
12531  {
12532  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12533  {
12534  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12536  }
12537  if(createInfo.pool != VK_NULL_HANDLE)
12538  {
12539  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12541  }
12542  }
12543  if((createInfo.pool != VK_NULL_HANDLE) &&
12544  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12545  {
12546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12548  }
12549 
12550  if(createInfo.pool != VK_NULL_HANDLE)
12551  {
12552  const VkDeviceSize alignmentForPool = VMA_MAX(
12553  vkMemReq.alignment,
12554  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12555  return createInfo.pool->m_BlockVector.Allocate(
12556  createInfo.pool,
12557  m_CurrentFrameIndex.load(),
12558  vkMemReq.size,
12559  alignmentForPool,
12560  createInfo,
12561  suballocType,
12562  pAllocation);
12563  }
12564  else
12565  {
12566  // Bit mask of memory Vulkan types acceptable for this allocation.
12567  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12568  uint32_t memTypeIndex = UINT32_MAX;
12569  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12570  if(res == VK_SUCCESS)
12571  {
12572  VkDeviceSize alignmentForMemType = VMA_MAX(
12573  vkMemReq.alignment,
12574  GetMemoryTypeMinAlignment(memTypeIndex));
12575 
12576  res = AllocateMemoryOfType(
12577  vkMemReq.size,
12578  alignmentForMemType,
12579  requiresDedicatedAllocation || prefersDedicatedAllocation,
12580  dedicatedBuffer,
12581  dedicatedImage,
12582  createInfo,
12583  memTypeIndex,
12584  suballocType,
12585  pAllocation);
12586  // Succeeded on first try.
12587  if(res == VK_SUCCESS)
12588  {
12589  return res;
12590  }
12591  // Allocation from this memory type failed. Try other compatible memory types.
12592  else
12593  {
12594  for(;;)
12595  {
12596  // Remove old memTypeIndex from list of possibilities.
12597  memoryTypeBits &= ~(1u << memTypeIndex);
12598  // Find alternative memTypeIndex.
12599  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12600  if(res == VK_SUCCESS)
12601  {
12602  alignmentForMemType = VMA_MAX(
12603  vkMemReq.alignment,
12604  GetMemoryTypeMinAlignment(memTypeIndex));
12605 
12606  res = AllocateMemoryOfType(
12607  vkMemReq.size,
12608  alignmentForMemType,
12609  requiresDedicatedAllocation || prefersDedicatedAllocation,
12610  dedicatedBuffer,
12611  dedicatedImage,
12612  createInfo,
12613  memTypeIndex,
12614  suballocType,
12615  pAllocation);
12616  // Allocation from this alternative memory type succeeded.
12617  if(res == VK_SUCCESS)
12618  {
12619  return res;
12620  }
12621  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12622  }
12623  // No other matching memory type index could be found.
12624  else
12625  {
12626  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12627  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12628  }
12629  }
12630  }
12631  }
12632  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12633  else
12634  return res;
12635  }
12636 }
12637 
12638 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12639 {
12640  VMA_ASSERT(allocation);
12641 
12642  if(TouchAllocation(allocation))
12643  {
12644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12645  {
12646  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12647  }
12648 
12649  switch(allocation->GetType())
12650  {
12651  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12652  {
12653  VmaBlockVector* pBlockVector = VMA_NULL;
12654  VmaPool hPool = allocation->GetPool();
12655  if(hPool != VK_NULL_HANDLE)
12656  {
12657  pBlockVector = &hPool->m_BlockVector;
12658  }
12659  else
12660  {
12661  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12662  pBlockVector = m_pBlockVectors[memTypeIndex];
12663  }
12664  pBlockVector->Free(allocation);
12665  }
12666  break;
12667  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12668  FreeDedicatedMemory(allocation);
12669  break;
12670  default:
12671  VMA_ASSERT(0);
12672  }
12673  }
12674 
12675  allocation->SetUserData(this, VMA_NULL);
12676  vma_delete(this, allocation);
12677 }
12678 
12679 VkResult VmaAllocator_T::ResizeAllocation(
12680  const VmaAllocation alloc,
12681  VkDeviceSize newSize)
12682 {
12683  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12684  {
12685  return VK_ERROR_VALIDATION_FAILED_EXT;
12686  }
12687  if(newSize == alloc->GetSize())
12688  {
12689  return VK_SUCCESS;
12690  }
12691 
12692  switch(alloc->GetType())
12693  {
12694  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12695  return VK_ERROR_FEATURE_NOT_PRESENT;
12696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12697  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12698  {
12699  alloc->ChangeSize(newSize);
12700  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12701  return VK_SUCCESS;
12702  }
12703  else
12704  {
12705  return VK_ERROR_OUT_OF_POOL_MEMORY;
12706  }
12707  default:
12708  VMA_ASSERT(0);
12709  return VK_ERROR_VALIDATION_FAILED_EXT;
12710  }
12711 }
12712 
12713 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12714 {
12715  // Initialize.
12716  InitStatInfo(pStats->total);
12717  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12718  InitStatInfo(pStats->memoryType[i]);
12719  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12720  InitStatInfo(pStats->memoryHeap[i]);
12721 
12722  // Process default pools.
12723  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12724  {
12725  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12726  VMA_ASSERT(pBlockVector);
12727  pBlockVector->AddStats(pStats);
12728  }
12729 
12730  // Process custom pools.
12731  {
12732  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12733  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12734  {
12735  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12736  }
12737  }
12738 
12739  // Process dedicated allocations.
12740  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12741  {
12742  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12743  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12744  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12745  VMA_ASSERT(pDedicatedAllocVector);
12746  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12747  {
12748  VmaStatInfo allocationStatInfo;
12749  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12750  VmaAddStatInfo(pStats->total, allocationStatInfo);
12751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12753  }
12754  }
12755 
12756  // Postprocess.
12757  VmaPostprocessCalcStatInfo(pStats->total);
12758  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12759  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12760  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12761  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12762 }
12763 
12764 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12765 
12766 VkResult VmaAllocator_T::Defragment(
12767  VmaAllocation* pAllocations,
12768  size_t allocationCount,
12769  VkBool32* pAllocationsChanged,
12770  const VmaDefragmentationInfo* pDefragmentationInfo,
12771  VmaDefragmentationStats* pDefragmentationStats)
12772 {
12773  if(pAllocationsChanged != VMA_NULL)
12774  {
12775  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12776  }
12777  if(pDefragmentationStats != VMA_NULL)
12778  {
12779  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12780  }
12781 
12782  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12783 
12784  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12785 
12786  const size_t poolCount = m_Pools.size();
12787 
12788  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12789  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12790  {
12791  VmaAllocation hAlloc = pAllocations[allocIndex];
12792  VMA_ASSERT(hAlloc);
12793  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12794  // DedicatedAlloc cannot be defragmented.
12795  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12796  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12797  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12798  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12799  // Lost allocation cannot be defragmented.
12800  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12801  {
12802  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12803 
12804  const VmaPool hAllocPool = hAlloc->GetPool();
12805  // This allocation belongs to custom pool.
12806  if(hAllocPool != VK_NULL_HANDLE)
12807  {
12808  // Pools with linear or buddy algorithm are not defragmented.
12809  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12810  {
12811  pAllocBlockVector = &hAllocPool->m_BlockVector;
12812  }
12813  }
12814  // This allocation belongs to general pool.
12815  else
12816  {
12817  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12818  }
12819 
12820  if(pAllocBlockVector != VMA_NULL)
12821  {
12822  VmaDefragmentator* const pDefragmentator =
12823  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12824  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12825  &pAllocationsChanged[allocIndex] : VMA_NULL;
12826  pDefragmentator->AddAllocation(hAlloc, pChanged);
12827  }
12828  }
12829  }
12830 
12831  VkResult result = VK_SUCCESS;
12832 
12833  // ======== Main processing.
12834 
12835  VkDeviceSize maxBytesToMove = SIZE_MAX;
12836  uint32_t maxAllocationsToMove = UINT32_MAX;
12837  if(pDefragmentationInfo != VMA_NULL)
12838  {
12839  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12840  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12841  }
12842 
12843  // Process standard memory.
12844  for(uint32_t memTypeIndex = 0;
12845  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12846  ++memTypeIndex)
12847  {
12848  // Only HOST_VISIBLE memory types can be defragmented.
12849  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12850  {
12851  result = m_pBlockVectors[memTypeIndex]->Defragment(
12852  pDefragmentationStats,
12853  maxBytesToMove,
12854  maxAllocationsToMove);
12855  }
12856  }
12857 
12858  // Process custom pools.
12859  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12860  {
12861  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12862  pDefragmentationStats,
12863  maxBytesToMove,
12864  maxAllocationsToMove);
12865  }
12866 
12867  // ======== Destroy defragmentators.
12868 
12869  // Process custom pools.
12870  for(size_t poolIndex = poolCount; poolIndex--; )
12871  {
12872  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12873  }
12874 
12875  // Process standard memory.
12876  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12877  {
12878  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12879  {
12880  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12881  }
12882  }
12883 
12884  return result;
12885 }
12886 
12887 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12888 {
12889  if(hAllocation->CanBecomeLost())
12890  {
12891  /*
12892  Warning: This is a carefully designed algorithm.
12893  Do not modify unless you really know what you're doing :)
12894  */
12895  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12896  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12897  for(;;)
12898  {
12899  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12900  {
12901  pAllocationInfo->memoryType = UINT32_MAX;
12902  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12903  pAllocationInfo->offset = 0;
12904  pAllocationInfo->size = hAllocation->GetSize();
12905  pAllocationInfo->pMappedData = VMA_NULL;
12906  pAllocationInfo->pUserData = hAllocation->GetUserData();
12907  return;
12908  }
12909  else if(localLastUseFrameIndex == localCurrFrameIndex)
12910  {
12911  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12912  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12913  pAllocationInfo->offset = hAllocation->GetOffset();
12914  pAllocationInfo->size = hAllocation->GetSize();
12915  pAllocationInfo->pMappedData = VMA_NULL;
12916  pAllocationInfo->pUserData = hAllocation->GetUserData();
12917  return;
12918  }
12919  else // Last use time earlier than current time.
12920  {
12921  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12922  {
12923  localLastUseFrameIndex = localCurrFrameIndex;
12924  }
12925  }
12926  }
12927  }
12928  else
12929  {
12930 #if VMA_STATS_STRING_ENABLED
12931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12933  for(;;)
12934  {
12935  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12936  if(localLastUseFrameIndex == localCurrFrameIndex)
12937  {
12938  break;
12939  }
12940  else // Last use time earlier than current time.
12941  {
12942  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12943  {
12944  localLastUseFrameIndex = localCurrFrameIndex;
12945  }
12946  }
12947  }
12948 #endif
12949 
12950  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12951  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12952  pAllocationInfo->offset = hAllocation->GetOffset();
12953  pAllocationInfo->size = hAllocation->GetSize();
12954  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12955  pAllocationInfo->pUserData = hAllocation->GetUserData();
12956  }
12957 }
12958 
12959 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12960 {
12961  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12962  if(hAllocation->CanBecomeLost())
12963  {
12964  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12965  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12966  for(;;)
12967  {
12968  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12969  {
12970  return false;
12971  }
12972  else if(localLastUseFrameIndex == localCurrFrameIndex)
12973  {
12974  return true;
12975  }
12976  else // Last use time earlier than current time.
12977  {
12978  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12979  {
12980  localLastUseFrameIndex = localCurrFrameIndex;
12981  }
12982  }
12983  }
12984  }
12985  else
12986  {
12987 #if VMA_STATS_STRING_ENABLED
12988  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12989  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12990  for(;;)
12991  {
12992  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12993  if(localLastUseFrameIndex == localCurrFrameIndex)
12994  {
12995  break;
12996  }
12997  else // Last use time earlier than current time.
12998  {
12999  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
13000  {
13001  localLastUseFrameIndex = localCurrFrameIndex;
13002  }
13003  }
13004  }
13005 #endif
13006 
13007  return true;
13008  }
13009 }
13010 
13011 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13012 {
13013  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13014 
13015  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13016 
13017  if(newCreateInfo.maxBlockCount == 0)
13018  {
13019  newCreateInfo.maxBlockCount = SIZE_MAX;
13020  }
13021  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13022  {
13023  return VK_ERROR_INITIALIZATION_FAILED;
13024  }
13025 
13026  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13027 
13028  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13029 
13030  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13031  if(res != VK_SUCCESS)
13032  {
13033  vma_delete(this, *pPool);
13034  *pPool = VMA_NULL;
13035  return res;
13036  }
13037 
13038  // Add to m_Pools.
13039  {
13040  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13041  (*pPool)->SetId(m_NextPoolId++);
13042  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13043  }
13044 
13045  return VK_SUCCESS;
13046 }
13047 
13048 void VmaAllocator_T::DestroyPool(VmaPool pool)
13049 {
13050  // Remove from m_Pools.
13051  {
13052  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13053  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13054  VMA_ASSERT(success && "Pool not found in Allocator.");
13055  }
13056 
13057  vma_delete(this, pool);
13058 }
13059 
13060 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13061 {
13062  pool->m_BlockVector.GetPoolStats(pPoolStats);
13063 }
13064 
13065 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13066 {
13067  m_CurrentFrameIndex.store(frameIndex);
13068 }
13069 
13070 void VmaAllocator_T::MakePoolAllocationsLost(
13071  VmaPool hPool,
13072  size_t* pLostAllocationCount)
13073 {
13074  hPool->m_BlockVector.MakePoolAllocationsLost(
13075  m_CurrentFrameIndex.load(),
13076  pLostAllocationCount);
13077 }
13078 
13079 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13080 {
13081  return hPool->m_BlockVector.CheckCorruption();
13082 }
13083 
13084 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13085 {
13086  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13087 
13088  // Process default pools.
13089  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13090  {
13091  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13092  {
13093  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13094  VMA_ASSERT(pBlockVector);
13095  VkResult localRes = pBlockVector->CheckCorruption();
13096  switch(localRes)
13097  {
13098  case VK_ERROR_FEATURE_NOT_PRESENT:
13099  break;
13100  case VK_SUCCESS:
13101  finalRes = VK_SUCCESS;
13102  break;
13103  default:
13104  return localRes;
13105  }
13106  }
13107  }
13108 
13109  // Process custom pools.
13110  {
13111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13112  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13113  {
13114  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13115  {
13116  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13117  switch(localRes)
13118  {
13119  case VK_ERROR_FEATURE_NOT_PRESENT:
13120  break;
13121  case VK_SUCCESS:
13122  finalRes = VK_SUCCESS;
13123  break;
13124  default:
13125  return localRes;
13126  }
13127  }
13128  }
13129  }
13130 
13131  return finalRes;
13132 }
13133 
13134 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13135 {
13136  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13137  (*pAllocation)->InitLost();
13138 }
13139 
13140 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13141 {
13142  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13143 
13144  VkResult res;
13145  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13146  {
13147  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13148  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13149  {
13150  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13151  if(res == VK_SUCCESS)
13152  {
13153  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13154  }
13155  }
13156  else
13157  {
13158  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13159  }
13160  }
13161  else
13162  {
13163  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13164  }
13165 
13166  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13167  {
13168  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13169  }
13170 
13171  return res;
13172 }
13173 
13174 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13175 {
13176  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13177  {
13178  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13179  }
13180 
13181  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13182 
13183  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13184  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13185  {
13186  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13187  m_HeapSizeLimit[heapIndex] += size;
13188  }
13189 }
13190 
13191 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13192 {
13193  if(hAllocation->CanBecomeLost())
13194  {
13195  return VK_ERROR_MEMORY_MAP_FAILED;
13196  }
13197 
13198  switch(hAllocation->GetType())
13199  {
13200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13201  {
13202  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13203  char *pBytes = VMA_NULL;
13204  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13205  if(res == VK_SUCCESS)
13206  {
13207  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13208  hAllocation->BlockAllocMap();
13209  }
13210  return res;
13211  }
13212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13213  return hAllocation->DedicatedAllocMap(this, ppData);
13214  default:
13215  VMA_ASSERT(0);
13216  return VK_ERROR_MEMORY_MAP_FAILED;
13217  }
13218 }
13219 
13220 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13221 {
13222  switch(hAllocation->GetType())
13223  {
13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13225  {
13226  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13227  hAllocation->BlockAllocUnmap();
13228  pBlock->Unmap(this, 1);
13229  }
13230  break;
13231  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13232  hAllocation->DedicatedAllocUnmap(this);
13233  break;
13234  default:
13235  VMA_ASSERT(0);
13236  }
13237 }
13238 
13239 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13240 {
13241  VkResult res = VK_SUCCESS;
13242  switch(hAllocation->GetType())
13243  {
13244  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13245  res = GetVulkanFunctions().vkBindBufferMemory(
13246  m_hDevice,
13247  hBuffer,
13248  hAllocation->GetMemory(),
13249  0); //memoryOffset
13250  break;
13251  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13252  {
13253  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13254  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13255  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13256  break;
13257  }
13258  default:
13259  VMA_ASSERT(0);
13260  }
13261  return res;
13262 }
13263 
13264 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13265 {
13266  VkResult res = VK_SUCCESS;
13267  switch(hAllocation->GetType())
13268  {
13269  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13270  res = GetVulkanFunctions().vkBindImageMemory(
13271  m_hDevice,
13272  hImage,
13273  hAllocation->GetMemory(),
13274  0); //memoryOffset
13275  break;
13276  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13277  {
13278  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13279  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13280  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13281  break;
13282  }
13283  default:
13284  VMA_ASSERT(0);
13285  }
13286  return res;
13287 }
13288 
13289 void VmaAllocator_T::FlushOrInvalidateAllocation(
13290  VmaAllocation hAllocation,
13291  VkDeviceSize offset, VkDeviceSize size,
13292  VMA_CACHE_OPERATION op)
13293 {
13294  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13295  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13296  {
13297  const VkDeviceSize allocationSize = hAllocation->GetSize();
13298  VMA_ASSERT(offset <= allocationSize);
13299 
13300  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13301 
13302  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13303  memRange.memory = hAllocation->GetMemory();
13304 
13305  switch(hAllocation->GetType())
13306  {
13307  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13308  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13309  if(size == VK_WHOLE_SIZE)
13310  {
13311  memRange.size = allocationSize - memRange.offset;
13312  }
13313  else
13314  {
13315  VMA_ASSERT(offset + size <= allocationSize);
13316  memRange.size = VMA_MIN(
13317  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13318  allocationSize - memRange.offset);
13319  }
13320  break;
13321 
13322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13323  {
13324  // 1. Still within this allocation.
13325  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13326  if(size == VK_WHOLE_SIZE)
13327  {
13328  size = allocationSize - offset;
13329  }
13330  else
13331  {
13332  VMA_ASSERT(offset + size <= allocationSize);
13333  }
13334  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13335 
13336  // 2. Adjust to whole block.
13337  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13338  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13339  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13340  memRange.offset += allocationOffset;
13341  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13342 
13343  break;
13344  }
13345 
13346  default:
13347  VMA_ASSERT(0);
13348  }
13349 
13350  switch(op)
13351  {
13352  case VMA_CACHE_FLUSH:
13353  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13354  break;
13355  case VMA_CACHE_INVALIDATE:
13356  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13357  break;
13358  default:
13359  VMA_ASSERT(0);
13360  }
13361  }
13362  // else: Just ignore this call.
13363 }
13364 
13365 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13366 {
13367  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13368 
13369  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13370  {
13371  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13372  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13373  VMA_ASSERT(pDedicatedAllocations);
13374  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13375  VMA_ASSERT(success);
13376  }
13377 
13378  VkDeviceMemory hMemory = allocation->GetMemory();
13379 
13380  /*
13381  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13382  before vkFreeMemory.
13383 
13384  if(allocation->GetMappedData() != VMA_NULL)
13385  {
13386  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13387  }
13388  */
13389 
13390  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13391 
13392  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13393 }
13394 
13395 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13396 {
13397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13398  !hAllocation->CanBecomeLost() &&
13399  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13400  {
13401  void* pData = VMA_NULL;
13402  VkResult res = Map(hAllocation, &pData);
13403  if(res == VK_SUCCESS)
13404  {
13405  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13406  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13407  Unmap(hAllocation);
13408  }
13409  else
13410  {
13411  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13412  }
13413  }
13414 }
13415 
13416 #if VMA_STATS_STRING_ENABLED
13417 
13418 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13419 {
13420  bool dedicatedAllocationsStarted = false;
13421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13422  {
13423  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13424  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13425  VMA_ASSERT(pDedicatedAllocVector);
13426  if(pDedicatedAllocVector->empty() == false)
13427  {
13428  if(dedicatedAllocationsStarted == false)
13429  {
13430  dedicatedAllocationsStarted = true;
13431  json.WriteString("DedicatedAllocations");
13432  json.BeginObject();
13433  }
13434 
13435  json.BeginString("Type ");
13436  json.ContinueString(memTypeIndex);
13437  json.EndString();
13438 
13439  json.BeginArray();
13440 
13441  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13442  {
13443  json.BeginObject(true);
13444  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13445  hAlloc->PrintParameters(json);
13446  json.EndObject();
13447  }
13448 
13449  json.EndArray();
13450  }
13451  }
13452  if(dedicatedAllocationsStarted)
13453  {
13454  json.EndObject();
13455  }
13456 
13457  {
13458  bool allocationsStarted = false;
13459  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13460  {
13461  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13462  {
13463  if(allocationsStarted == false)
13464  {
13465  allocationsStarted = true;
13466  json.WriteString("DefaultPools");
13467  json.BeginObject();
13468  }
13469 
13470  json.BeginString("Type ");
13471  json.ContinueString(memTypeIndex);
13472  json.EndString();
13473 
13474  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13475  }
13476  }
13477  if(allocationsStarted)
13478  {
13479  json.EndObject();
13480  }
13481  }
13482 
13483  // Custom pools
13484  {
13485  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13486  const size_t poolCount = m_Pools.size();
13487  if(poolCount > 0)
13488  {
13489  json.WriteString("Pools");
13490  json.BeginObject();
13491  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13492  {
13493  json.BeginString();
13494  json.ContinueString(m_Pools[poolIndex]->GetId());
13495  json.EndString();
13496 
13497  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13498  }
13499  json.EndObject();
13500  }
13501  }
13502 }
13503 
13504 #endif // #if VMA_STATS_STRING_ENABLED
13505 
13507 // Public interface
13508 
13509 VkResult vmaCreateAllocator(
13510  const VmaAllocatorCreateInfo* pCreateInfo,
13511  VmaAllocator* pAllocator)
13512 {
13513  VMA_ASSERT(pCreateInfo && pAllocator);
13514  VMA_DEBUG_LOG("vmaCreateAllocator");
13515  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13516  return (*pAllocator)->Init(pCreateInfo);
13517 }
13518 
13519 void vmaDestroyAllocator(
13520  VmaAllocator allocator)
13521 {
13522  if(allocator != VK_NULL_HANDLE)
13523  {
13524  VMA_DEBUG_LOG("vmaDestroyAllocator");
13525  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13526  vma_delete(&allocationCallbacks, allocator);
13527  }
13528 }
13529 
13531  VmaAllocator allocator,
13532  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13533 {
13534  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13535  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13536 }
13537 
13539  VmaAllocator allocator,
13540  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13541 {
13542  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13543  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13544 }
13545 
13547  VmaAllocator allocator,
13548  uint32_t memoryTypeIndex,
13549  VkMemoryPropertyFlags* pFlags)
13550 {
13551  VMA_ASSERT(allocator && pFlags);
13552  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13553  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13554 }
13555 
13557  VmaAllocator allocator,
13558  uint32_t frameIndex)
13559 {
13560  VMA_ASSERT(allocator);
13561  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13562 
13563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13564 
13565  allocator->SetCurrentFrameIndex(frameIndex);
13566 }
13567 
13568 void vmaCalculateStats(
13569  VmaAllocator allocator,
13570  VmaStats* pStats)
13571 {
13572  VMA_ASSERT(allocator && pStats);
13573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13574  allocator->CalculateStats(pStats);
13575 }
13576 
13577 #if VMA_STATS_STRING_ENABLED
13578 
13579 void vmaBuildStatsString(
13580  VmaAllocator allocator,
13581  char** ppStatsString,
13582  VkBool32 detailedMap)
13583 {
13584  VMA_ASSERT(allocator && ppStatsString);
13585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13586 
13587  VmaStringBuilder sb(allocator);
13588  {
13589  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13590  json.BeginObject();
13591 
13592  VmaStats stats;
13593  allocator->CalculateStats(&stats);
13594 
13595  json.WriteString("Total");
13596  VmaPrintStatInfo(json, stats.total);
13597 
13598  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13599  {
13600  json.BeginString("Heap ");
13601  json.ContinueString(heapIndex);
13602  json.EndString();
13603  json.BeginObject();
13604 
13605  json.WriteString("Size");
13606  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13607 
13608  json.WriteString("Flags");
13609  json.BeginArray(true);
13610  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13611  {
13612  json.WriteString("DEVICE_LOCAL");
13613  }
13614  json.EndArray();
13615 
13616  if(stats.memoryHeap[heapIndex].blockCount > 0)
13617  {
13618  json.WriteString("Stats");
13619  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13620  }
13621 
13622  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13623  {
13624  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13625  {
13626  json.BeginString("Type ");
13627  json.ContinueString(typeIndex);
13628  json.EndString();
13629 
13630  json.BeginObject();
13631 
13632  json.WriteString("Flags");
13633  json.BeginArray(true);
13634  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13635  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13636  {
13637  json.WriteString("DEVICE_LOCAL");
13638  }
13639  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13640  {
13641  json.WriteString("HOST_VISIBLE");
13642  }
13643  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13644  {
13645  json.WriteString("HOST_COHERENT");
13646  }
13647  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13648  {
13649  json.WriteString("HOST_CACHED");
13650  }
13651  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13652  {
13653  json.WriteString("LAZILY_ALLOCATED");
13654  }
13655  json.EndArray();
13656 
13657  if(stats.memoryType[typeIndex].blockCount > 0)
13658  {
13659  json.WriteString("Stats");
13660  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13661  }
13662 
13663  json.EndObject();
13664  }
13665  }
13666 
13667  json.EndObject();
13668  }
13669  if(detailedMap == VK_TRUE)
13670  {
13671  allocator->PrintDetailedMap(json);
13672  }
13673 
13674  json.EndObject();
13675  }
13676 
13677  const size_t len = sb.GetLength();
13678  char* const pChars = vma_new_array(allocator, char, len + 1);
13679  if(len > 0)
13680  {
13681  memcpy(pChars, sb.GetData(), len);
13682  }
13683  pChars[len] = '\0';
13684  *ppStatsString = pChars;
13685 }
13686 
13687 void vmaFreeStatsString(
13688  VmaAllocator allocator,
13689  char* pStatsString)
13690 {
13691  if(pStatsString != VMA_NULL)
13692  {
13693  VMA_ASSERT(allocator);
13694  size_t len = strlen(pStatsString);
13695  vma_delete_array(allocator, pStatsString, len + 1);
13696  }
13697 }
13698 
13699 #endif // #if VMA_STATS_STRING_ENABLED
13700 
13701 /*
13702 This function is not protected by any mutex because it just reads immutable data.
13703 */
13704 VkResult vmaFindMemoryTypeIndex(
13705  VmaAllocator allocator,
13706  uint32_t memoryTypeBits,
13707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13708  uint32_t* pMemoryTypeIndex)
13709 {
13710  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13713 
13714  if(pAllocationCreateInfo->memoryTypeBits != 0)
13715  {
13716  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13717  }
13718 
13719  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13720  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13721 
13722  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13723  if(mapped)
13724  {
13725  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13726  }
13727 
13728  // Convert usage to requiredFlags and preferredFlags.
13729  switch(pAllocationCreateInfo->usage)
13730  {
13732  break;
13734  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13735  {
13736  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13737  }
13738  break;
13740  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13741  break;
13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13744  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13745  {
13746  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13747  }
13748  break;
13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13751  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13752  break;
13753  default:
13754  break;
13755  }
13756 
13757  *pMemoryTypeIndex = UINT32_MAX;
13758  uint32_t minCost = UINT32_MAX;
13759  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13760  memTypeIndex < allocator->GetMemoryTypeCount();
13761  ++memTypeIndex, memTypeBit <<= 1)
13762  {
13763  // This memory type is acceptable according to memoryTypeBits bitmask.
13764  if((memTypeBit & memoryTypeBits) != 0)
13765  {
13766  const VkMemoryPropertyFlags currFlags =
13767  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13768  // This memory type contains requiredFlags.
13769  if((requiredFlags & ~currFlags) == 0)
13770  {
13771  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13772  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13773  // Remember memory type with lowest cost.
13774  if(currCost < minCost)
13775  {
13776  *pMemoryTypeIndex = memTypeIndex;
13777  if(currCost == 0)
13778  {
13779  return VK_SUCCESS;
13780  }
13781  minCost = currCost;
13782  }
13783  }
13784  }
13785  }
13786  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13787 }
13788 
13790  VmaAllocator allocator,
13791  const VkBufferCreateInfo* pBufferCreateInfo,
13792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13793  uint32_t* pMemoryTypeIndex)
13794 {
13795  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13796  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13797  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13798  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13799 
13800  const VkDevice hDev = allocator->m_hDevice;
13801  VkBuffer hBuffer = VK_NULL_HANDLE;
13802  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13803  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13804  if(res == VK_SUCCESS)
13805  {
13806  VkMemoryRequirements memReq = {};
13807  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13808  hDev, hBuffer, &memReq);
13809 
13810  res = vmaFindMemoryTypeIndex(
13811  allocator,
13812  memReq.memoryTypeBits,
13813  pAllocationCreateInfo,
13814  pMemoryTypeIndex);
13815 
13816  allocator->GetVulkanFunctions().vkDestroyBuffer(
13817  hDev, hBuffer, allocator->GetAllocationCallbacks());
13818  }
13819  return res;
13820 }
13821 
13823  VmaAllocator allocator,
13824  const VkImageCreateInfo* pImageCreateInfo,
13825  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13826  uint32_t* pMemoryTypeIndex)
13827 {
13828  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13829  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13830  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13831  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13832 
13833  const VkDevice hDev = allocator->m_hDevice;
13834  VkImage hImage = VK_NULL_HANDLE;
13835  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13836  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13837  if(res == VK_SUCCESS)
13838  {
13839  VkMemoryRequirements memReq = {};
13840  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13841  hDev, hImage, &memReq);
13842 
13843  res = vmaFindMemoryTypeIndex(
13844  allocator,
13845  memReq.memoryTypeBits,
13846  pAllocationCreateInfo,
13847  pMemoryTypeIndex);
13848 
13849  allocator->GetVulkanFunctions().vkDestroyImage(
13850  hDev, hImage, allocator->GetAllocationCallbacks());
13851  }
13852  return res;
13853 }
13854 
13855 VkResult vmaCreatePool(
13856  VmaAllocator allocator,
13857  const VmaPoolCreateInfo* pCreateInfo,
13858  VmaPool* pPool)
13859 {
13860  VMA_ASSERT(allocator && pCreateInfo && pPool);
13861 
13862  VMA_DEBUG_LOG("vmaCreatePool");
13863 
13864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13865 
13866  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13872  }
13873 #endif
13874 
13875  return res;
13876 }
13877 
13878 void vmaDestroyPool(
13879  VmaAllocator allocator,
13880  VmaPool pool)
13881 {
13882  VMA_ASSERT(allocator);
13883 
13884  if(pool == VK_NULL_HANDLE)
13885  {
13886  return;
13887  }
13888 
13889  VMA_DEBUG_LOG("vmaDestroyPool");
13890 
13891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13892 
13893 #if VMA_RECORDING_ENABLED
13894  if(allocator->GetRecorder() != VMA_NULL)
13895  {
13896  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13897  }
13898 #endif
13899 
13900  allocator->DestroyPool(pool);
13901 }
13902 
13903 void vmaGetPoolStats(
13904  VmaAllocator allocator,
13905  VmaPool pool,
13906  VmaPoolStats* pPoolStats)
13907 {
13908  VMA_ASSERT(allocator && pool && pPoolStats);
13909 
13910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13911 
13912  allocator->GetPoolStats(pool, pPoolStats);
13913 }
13914 
13916  VmaAllocator allocator,
13917  VmaPool pool,
13918  size_t* pLostAllocationCount)
13919 {
13920  VMA_ASSERT(allocator && pool);
13921 
13922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13923 
13924 #if VMA_RECORDING_ENABLED
13925  if(allocator->GetRecorder() != VMA_NULL)
13926  {
13927  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13928  }
13929 #endif
13930 
13931  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13932 }
13933 
13934 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13935 {
13936  VMA_ASSERT(allocator && pool);
13937 
13938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13939 
13940  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13941 
13942  return allocator->CheckPoolCorruption(pool);
13943 }
13944 
13945 VkResult vmaAllocateMemory(
13946  VmaAllocator allocator,
13947  const VkMemoryRequirements* pVkMemoryRequirements,
13948  const VmaAllocationCreateInfo* pCreateInfo,
13949  VmaAllocation* pAllocation,
13950  VmaAllocationInfo* pAllocationInfo)
13951 {
13952  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13953 
13954  VMA_DEBUG_LOG("vmaAllocateMemory");
13955 
13956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13957 
13958  VkResult result = allocator->AllocateMemory(
13959  *pVkMemoryRequirements,
13960  false, // requiresDedicatedAllocation
13961  false, // prefersDedicatedAllocation
13962  VK_NULL_HANDLE, // dedicatedBuffer
13963  VK_NULL_HANDLE, // dedicatedImage
13964  *pCreateInfo,
13965  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13966  pAllocation);
13967 
13968 #if VMA_RECORDING_ENABLED
13969  if(allocator->GetRecorder() != VMA_NULL)
13970  {
13971  allocator->GetRecorder()->RecordAllocateMemory(
13972  allocator->GetCurrentFrameIndex(),
13973  *pVkMemoryRequirements,
13974  *pCreateInfo,
13975  *pAllocation);
13976  }
13977 #endif
13978 
13979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13980  {
13981  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13982  }
13983 
13984  return result;
13985 }
13986 
13988  VmaAllocator allocator,
13989  VkBuffer buffer,
13990  const VmaAllocationCreateInfo* pCreateInfo,
13991  VmaAllocation* pAllocation,
13992  VmaAllocationInfo* pAllocationInfo)
13993 {
13994  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13995 
13996  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13997 
13998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13999 
14000  VkMemoryRequirements vkMemReq = {};
14001  bool requiresDedicatedAllocation = false;
14002  bool prefersDedicatedAllocation = false;
14003  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14004  requiresDedicatedAllocation,
14005  prefersDedicatedAllocation);
14006 
14007  VkResult result = allocator->AllocateMemory(
14008  vkMemReq,
14009  requiresDedicatedAllocation,
14010  prefersDedicatedAllocation,
14011  buffer, // dedicatedBuffer
14012  VK_NULL_HANDLE, // dedicatedImage
14013  *pCreateInfo,
14014  VMA_SUBALLOCATION_TYPE_BUFFER,
14015  pAllocation);
14016 
14017 #if VMA_RECORDING_ENABLED
14018  if(allocator->GetRecorder() != VMA_NULL)
14019  {
14020  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14021  allocator->GetCurrentFrameIndex(),
14022  vkMemReq,
14023  requiresDedicatedAllocation,
14024  prefersDedicatedAllocation,
14025  *pCreateInfo,
14026  *pAllocation);
14027  }
14028 #endif
14029 
14030  if(pAllocationInfo && result == VK_SUCCESS)
14031  {
14032  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14033  }
14034 
14035  return result;
14036 }
14037 
14038 VkResult vmaAllocateMemoryForImage(
14039  VmaAllocator allocator,
14040  VkImage image,
14041  const VmaAllocationCreateInfo* pCreateInfo,
14042  VmaAllocation* pAllocation,
14043  VmaAllocationInfo* pAllocationInfo)
14044 {
14045  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14046 
14047  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14048 
14049  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14050 
14051  VkMemoryRequirements vkMemReq = {};
14052  bool requiresDedicatedAllocation = false;
14053  bool prefersDedicatedAllocation = false;
14054  allocator->GetImageMemoryRequirements(image, vkMemReq,
14055  requiresDedicatedAllocation, prefersDedicatedAllocation);
14056 
14057  VkResult result = allocator->AllocateMemory(
14058  vkMemReq,
14059  requiresDedicatedAllocation,
14060  prefersDedicatedAllocation,
14061  VK_NULL_HANDLE, // dedicatedBuffer
14062  image, // dedicatedImage
14063  *pCreateInfo,
14064  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14065  pAllocation);
14066 
14067 #if VMA_RECORDING_ENABLED
14068  if(allocator->GetRecorder() != VMA_NULL)
14069  {
14070  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14071  allocator->GetCurrentFrameIndex(),
14072  vkMemReq,
14073  requiresDedicatedAllocation,
14074  prefersDedicatedAllocation,
14075  *pCreateInfo,
14076  *pAllocation);
14077  }
14078 #endif
14079 
14080  if(pAllocationInfo && result == VK_SUCCESS)
14081  {
14082  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14083  }
14084 
14085  return result;
14086 }
14087 
14088 void vmaFreeMemory(
14089  VmaAllocator allocator,
14090  VmaAllocation allocation)
14091 {
14092  VMA_ASSERT(allocator);
14093 
14094  if(allocation == VK_NULL_HANDLE)
14095  {
14096  return;
14097  }
14098 
14099  VMA_DEBUG_LOG("vmaFreeMemory");
14100 
14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14102 
14103 #if VMA_RECORDING_ENABLED
14104  if(allocator->GetRecorder() != VMA_NULL)
14105  {
14106  allocator->GetRecorder()->RecordFreeMemory(
14107  allocator->GetCurrentFrameIndex(),
14108  allocation);
14109  }
14110 #endif
14111 
14112  allocator->FreeMemory(allocation);
14113 }
14114 
14115 VkResult vmaResizeAllocation(
14116  VmaAllocator allocator,
14117  VmaAllocation allocation,
14118  VkDeviceSize newSize)
14119 {
14120  VMA_ASSERT(allocator && allocation);
14121 
14122  VMA_DEBUG_LOG("vmaResizeAllocation");
14123 
14124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14125 
14126 #if VMA_RECORDING_ENABLED
14127  if(allocator->GetRecorder() != VMA_NULL)
14128  {
14129  allocator->GetRecorder()->RecordResizeAllocation(
14130  allocator->GetCurrentFrameIndex(),
14131  allocation,
14132  newSize);
14133  }
14134 #endif
14135 
14136  return allocator->ResizeAllocation(allocation, newSize);
14137 }
14138 
14140  VmaAllocator allocator,
14141  VmaAllocation allocation,
14142  VmaAllocationInfo* pAllocationInfo)
14143 {
14144  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14145 
14146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14147 
14148 #if VMA_RECORDING_ENABLED
14149  if(allocator->GetRecorder() != VMA_NULL)
14150  {
14151  allocator->GetRecorder()->RecordGetAllocationInfo(
14152  allocator->GetCurrentFrameIndex(),
14153  allocation);
14154  }
14155 #endif
14156 
14157  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14158 }
14159 
14160 VkBool32 vmaTouchAllocation(
14161  VmaAllocator allocator,
14162  VmaAllocation allocation)
14163 {
14164  VMA_ASSERT(allocator && allocation);
14165 
14166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14167 
14168 #if VMA_RECORDING_ENABLED
14169  if(allocator->GetRecorder() != VMA_NULL)
14170  {
14171  allocator->GetRecorder()->RecordTouchAllocation(
14172  allocator->GetCurrentFrameIndex(),
14173  allocation);
14174  }
14175 #endif
14176 
14177  return allocator->TouchAllocation(allocation);
14178 }
14179 
14181  VmaAllocator allocator,
14182  VmaAllocation allocation,
14183  void* pUserData)
14184 {
14185  VMA_ASSERT(allocator && allocation);
14186 
14187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14188 
14189  allocation->SetUserData(allocator, pUserData);
14190 
14191 #if VMA_RECORDING_ENABLED
14192  if(allocator->GetRecorder() != VMA_NULL)
14193  {
14194  allocator->GetRecorder()->RecordSetAllocationUserData(
14195  allocator->GetCurrentFrameIndex(),
14196  allocation,
14197  pUserData);
14198  }
14199 #endif
14200 }
14201 
14203  VmaAllocator allocator,
14204  VmaAllocation* pAllocation)
14205 {
14206  VMA_ASSERT(allocator && pAllocation);
14207 
14208  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14209 
14210  allocator->CreateLostAllocation(pAllocation);
14211 
14212 #if VMA_RECORDING_ENABLED
14213  if(allocator->GetRecorder() != VMA_NULL)
14214  {
14215  allocator->GetRecorder()->RecordCreateLostAllocation(
14216  allocator->GetCurrentFrameIndex(),
14217  *pAllocation);
14218  }
14219 #endif
14220 }
14221 
14222 VkResult vmaMapMemory(
14223  VmaAllocator allocator,
14224  VmaAllocation allocation,
14225  void** ppData)
14226 {
14227  VMA_ASSERT(allocator && allocation && ppData);
14228 
14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14230 
14231  VkResult res = allocator->Map(allocation, ppData);
14232 
14233 #if VMA_RECORDING_ENABLED
14234  if(allocator->GetRecorder() != VMA_NULL)
14235  {
14236  allocator->GetRecorder()->RecordMapMemory(
14237  allocator->GetCurrentFrameIndex(),
14238  allocation);
14239  }
14240 #endif
14241 
14242  return res;
14243 }
14244 
14245 void vmaUnmapMemory(
14246  VmaAllocator allocator,
14247  VmaAllocation allocation)
14248 {
14249  VMA_ASSERT(allocator && allocation);
14250 
14251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14252 
14253 #if VMA_RECORDING_ENABLED
14254  if(allocator->GetRecorder() != VMA_NULL)
14255  {
14256  allocator->GetRecorder()->RecordUnmapMemory(
14257  allocator->GetCurrentFrameIndex(),
14258  allocation);
14259  }
14260 #endif
14261 
14262  allocator->Unmap(allocation);
14263 }
14264 
14265 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14266 {
14267  VMA_ASSERT(allocator && allocation);
14268 
14269  VMA_DEBUG_LOG("vmaFlushAllocation");
14270 
14271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14272 
14273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14274 
14275 #if VMA_RECORDING_ENABLED
14276  if(allocator->GetRecorder() != VMA_NULL)
14277  {
14278  allocator->GetRecorder()->RecordFlushAllocation(
14279  allocator->GetCurrentFrameIndex(),
14280  allocation, offset, size);
14281  }
14282 #endif
14283 }
14284 
14285 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14286 {
14287  VMA_ASSERT(allocator && allocation);
14288 
14289  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14290 
14291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14292 
14293  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14294 
14295 #if VMA_RECORDING_ENABLED
14296  if(allocator->GetRecorder() != VMA_NULL)
14297  {
14298  allocator->GetRecorder()->RecordInvalidateAllocation(
14299  allocator->GetCurrentFrameIndex(),
14300  allocation, offset, size);
14301  }
14302 #endif
14303 }
14304 
14305 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14306 {
14307  VMA_ASSERT(allocator);
14308 
14309  VMA_DEBUG_LOG("vmaCheckCorruption");
14310 
14311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14312 
14313  return allocator->CheckCorruption(memoryTypeBits);
14314 }
14315 
14316 VkResult vmaDefragment(
14317  VmaAllocator allocator,
14318  VmaAllocation* pAllocations,
14319  size_t allocationCount,
14320  VkBool32* pAllocationsChanged,
14321  const VmaDefragmentationInfo *pDefragmentationInfo,
14322  VmaDefragmentationStats* pDefragmentationStats)
14323 {
14324  VMA_ASSERT(allocator && pAllocations);
14325 
14326  VMA_DEBUG_LOG("vmaDefragment");
14327 
14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14329 
14330  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14331 }
14332 
14333 VkResult vmaBindBufferMemory(
14334  VmaAllocator allocator,
14335  VmaAllocation allocation,
14336  VkBuffer buffer)
14337 {
14338  VMA_ASSERT(allocator && allocation && buffer);
14339 
14340  VMA_DEBUG_LOG("vmaBindBufferMemory");
14341 
14342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14343 
14344  return allocator->BindBufferMemory(allocation, buffer);
14345 }
14346 
14347 VkResult vmaBindImageMemory(
14348  VmaAllocator allocator,
14349  VmaAllocation allocation,
14350  VkImage image)
14351 {
14352  VMA_ASSERT(allocator && allocation && image);
14353 
14354  VMA_DEBUG_LOG("vmaBindImageMemory");
14355 
14356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14357 
14358  return allocator->BindImageMemory(allocation, image);
14359 }
14360 
14361 VkResult vmaCreateBuffer(
14362  VmaAllocator allocator,
14363  const VkBufferCreateInfo* pBufferCreateInfo,
14364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14365  VkBuffer* pBuffer,
14366  VmaAllocation* pAllocation,
14367  VmaAllocationInfo* pAllocationInfo)
14368 {
14369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14370 
14371  if(pBufferCreateInfo->size == 0)
14372  {
14373  return VK_ERROR_VALIDATION_FAILED_EXT;
14374  }
14375 
14376  VMA_DEBUG_LOG("vmaCreateBuffer");
14377 
14378  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14379 
14380  *pBuffer = VK_NULL_HANDLE;
14381  *pAllocation = VK_NULL_HANDLE;
14382 
14383  // 1. Create VkBuffer.
14384  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14385  allocator->m_hDevice,
14386  pBufferCreateInfo,
14387  allocator->GetAllocationCallbacks(),
14388  pBuffer);
14389  if(res >= 0)
14390  {
14391  // 2. vkGetBufferMemoryRequirements.
14392  VkMemoryRequirements vkMemReq = {};
14393  bool requiresDedicatedAllocation = false;
14394  bool prefersDedicatedAllocation = false;
14395  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14396  requiresDedicatedAllocation, prefersDedicatedAllocation);
14397 
14398  // Make sure alignment requirements for specific buffer usages reported
14399  // in Physical Device Properties are included in alignment reported by memory requirements.
14400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14401  {
14402  VMA_ASSERT(vkMemReq.alignment %
14403  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14404  }
14405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14406  {
14407  VMA_ASSERT(vkMemReq.alignment %
14408  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14409  }
14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14411  {
14412  VMA_ASSERT(vkMemReq.alignment %
14413  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14414  }
14415 
14416  // 3. Allocate memory using allocator.
14417  res = allocator->AllocateMemory(
14418  vkMemReq,
14419  requiresDedicatedAllocation,
14420  prefersDedicatedAllocation,
14421  *pBuffer, // dedicatedBuffer
14422  VK_NULL_HANDLE, // dedicatedImage
14423  *pAllocationCreateInfo,
14424  VMA_SUBALLOCATION_TYPE_BUFFER,
14425  pAllocation);
14426 
14427 #if VMA_RECORDING_ENABLED
14428  if(allocator->GetRecorder() != VMA_NULL)
14429  {
14430  allocator->GetRecorder()->RecordCreateBuffer(
14431  allocator->GetCurrentFrameIndex(),
14432  *pBufferCreateInfo,
14433  *pAllocationCreateInfo,
14434  *pAllocation);
14435  }
14436 #endif
14437 
14438  if(res >= 0)
14439  {
14440  // 3. Bind buffer with memory.
14441  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14442  if(res >= 0)
14443  {
14444  // All steps succeeded.
14445  #if VMA_STATS_STRING_ENABLED
14446  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14447  #endif
14448  if(pAllocationInfo != VMA_NULL)
14449  {
14450  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14451  }
14452 
14453  return VK_SUCCESS;
14454  }
14455  allocator->FreeMemory(*pAllocation);
14456  *pAllocation = VK_NULL_HANDLE;
14457  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14458  *pBuffer = VK_NULL_HANDLE;
14459  return res;
14460  }
14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14462  *pBuffer = VK_NULL_HANDLE;
14463  return res;
14464  }
14465  return res;
14466 }
14467 
14468 void vmaDestroyBuffer(
14469  VmaAllocator allocator,
14470  VkBuffer buffer,
14471  VmaAllocation allocation)
14472 {
14473  VMA_ASSERT(allocator);
14474 
14475  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14476  {
14477  return;
14478  }
14479 
14480  VMA_DEBUG_LOG("vmaDestroyBuffer");
14481 
14482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14483 
14484 #if VMA_RECORDING_ENABLED
14485  if(allocator->GetRecorder() != VMA_NULL)
14486  {
14487  allocator->GetRecorder()->RecordDestroyBuffer(
14488  allocator->GetCurrentFrameIndex(),
14489  allocation);
14490  }
14491 #endif
14492 
14493  if(buffer != VK_NULL_HANDLE)
14494  {
14495  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14496  }
14497 
14498  if(allocation != VK_NULL_HANDLE)
14499  {
14500  allocator->FreeMemory(allocation);
14501  }
14502 }
14503 
14504 VkResult vmaCreateImage(
14505  VmaAllocator allocator,
14506  const VkImageCreateInfo* pImageCreateInfo,
14507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14508  VkImage* pImage,
14509  VmaAllocation* pAllocation,
14510  VmaAllocationInfo* pAllocationInfo)
14511 {
14512  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14513 
14514  if(pImageCreateInfo->extent.width == 0 ||
14515  pImageCreateInfo->extent.height == 0 ||
14516  pImageCreateInfo->extent.depth == 0 ||
14517  pImageCreateInfo->mipLevels == 0 ||
14518  pImageCreateInfo->arrayLayers == 0)
14519  {
14520  return VK_ERROR_VALIDATION_FAILED_EXT;
14521  }
14522 
14523  VMA_DEBUG_LOG("vmaCreateImage");
14524 
14525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14526 
14527  *pImage = VK_NULL_HANDLE;
14528  *pAllocation = VK_NULL_HANDLE;
14529 
14530  // 1. Create VkImage.
14531  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14532  allocator->m_hDevice,
14533  pImageCreateInfo,
14534  allocator->GetAllocationCallbacks(),
14535  pImage);
14536  if(res >= 0)
14537  {
14538  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14539  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14540  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14541 
14542  // 2. Allocate memory using allocator.
14543  VkMemoryRequirements vkMemReq = {};
14544  bool requiresDedicatedAllocation = false;
14545  bool prefersDedicatedAllocation = false;
14546  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14547  requiresDedicatedAllocation, prefersDedicatedAllocation);
14548 
14549  res = allocator->AllocateMemory(
14550  vkMemReq,
14551  requiresDedicatedAllocation,
14552  prefersDedicatedAllocation,
14553  VK_NULL_HANDLE, // dedicatedBuffer
14554  *pImage, // dedicatedImage
14555  *pAllocationCreateInfo,
14556  suballocType,
14557  pAllocation);
14558 
14559 #if VMA_RECORDING_ENABLED
14560  if(allocator->GetRecorder() != VMA_NULL)
14561  {
14562  allocator->GetRecorder()->RecordCreateImage(
14563  allocator->GetCurrentFrameIndex(),
14564  *pImageCreateInfo,
14565  *pAllocationCreateInfo,
14566  *pAllocation);
14567  }
14568 #endif
14569 
14570  if(res >= 0)
14571  {
14572  // 3. Bind image with memory.
14573  res = allocator->BindImageMemory(*pAllocation, *pImage);
14574  if(res >= 0)
14575  {
14576  // All steps succeeded.
14577  #if VMA_STATS_STRING_ENABLED
14578  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14579  #endif
14580  if(pAllocationInfo != VMA_NULL)
14581  {
14582  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14583  }
14584 
14585  return VK_SUCCESS;
14586  }
14587  allocator->FreeMemory(*pAllocation);
14588  *pAllocation = VK_NULL_HANDLE;
14589  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14590  *pImage = VK_NULL_HANDLE;
14591  return res;
14592  }
14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14594  *pImage = VK_NULL_HANDLE;
14595  return res;
14596  }
14597  return res;
14598 }
14599 
14600 void vmaDestroyImage(
14601  VmaAllocator allocator,
14602  VkImage image,
14603  VmaAllocation allocation)
14604 {
14605  VMA_ASSERT(allocator);
14606 
14607  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14608  {
14609  return;
14610  }
14611 
14612  VMA_DEBUG_LOG("vmaDestroyImage");
14613 
14614  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14615 
14616 #if VMA_RECORDING_ENABLED
14617  if(allocator->GetRecorder() != VMA_NULL)
14618  {
14619  allocator->GetRecorder()->RecordDestroyImage(
14620  allocator->GetCurrentFrameIndex(),
14621  allocation);
14622  }
14623 #endif
14624 
14625  if(image != VK_NULL_HANDLE)
14626  {
14627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14628  }
14629  if(allocation != VK_NULL_HANDLE)
14630  {
14631  allocator->FreeMemory(allocation);
14632  }
14633 }
14634 
14635 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1586
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1887
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1641
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1643
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
-
Definition: vk_mem_alloc.h:1615
-
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2207
-
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1596
+
Definition: vk_mem_alloc.h:1617
+
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2209
+
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1598
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Definition: vk_mem_alloc.h:1842
-
Definition: vk_mem_alloc.h:1945
-
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1588
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2307
-
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1638
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2577
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2096
-
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1485
+
Definition: vk_mem_alloc.h:1844
+
Definition: vk_mem_alloc.h:1947
+
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1590
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2309
+
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1640
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2579
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2098
+
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1487
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2188
-
Definition: vk_mem_alloc.h:1922
-
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1577
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1995
-
Definition: vk_mem_alloc.h:1869
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1650
-
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2124
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2190
+
Definition: vk_mem_alloc.h:1924
+
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1579
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1997
+
Definition: vk_mem_alloc.h:1871
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1652
+
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2126
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1703
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1635
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1705
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1637
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1873
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1875
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1775
-
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1593
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1774
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2581
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1777
+
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1595
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1776
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2583
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1667
-
VmaStatInfo total
Definition: vk_mem_alloc.h:1784
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2589
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1979
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2572
-
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1594
-
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1519
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1669
+
VmaStatInfo total
Definition: vk_mem_alloc.h:1786
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2591
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1981
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2574
+
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1596
+
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1521
Represents main object of this library initialized.
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1644
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1646
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2138
-
Definition: vk_mem_alloc.h:2132
-
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1710
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2317
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2140
+
Definition: vk_mem_alloc.h:2134
+
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1712
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2319
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1589
-
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1613
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2016
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2158
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2194
+
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1591
+
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1615
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2018
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2160
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2196
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
-
Definition: vk_mem_alloc.h:1575
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2141
+
Definition: vk_mem_alloc.h:1577
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2143
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:1820
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:1822
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2567
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2569
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2585
-
Definition: vk_mem_alloc.h:1859
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2003
-
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1592
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2587
+
Definition: vk_mem_alloc.h:1861
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2005
+
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1594
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1780
-
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1525
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1782
+
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1527
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
- +
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1546
+
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1548
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
-
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1617
-
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1551
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2587
+
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1619
+
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1553
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2589
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1990
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2204
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1992
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2206
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
-
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1585
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1763
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2153
-
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1538
-
Definition: vk_mem_alloc.h:2128
+
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1587
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1765
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2155
+
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1540
+
Definition: vk_mem_alloc.h:2130
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1929
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1776
-
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1542
-
Definition: vk_mem_alloc.h:1953
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2144
-
Definition: vk_mem_alloc.h:1868
-
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1591
+
Definition: vk_mem_alloc.h:1931
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1778
+
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1544
+
Definition: vk_mem_alloc.h:1955
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2146
+
Definition: vk_mem_alloc.h:1870
+
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1593
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1985
-
Definition: vk_mem_alloc.h:1976
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1987
+
Definition: vk_mem_alloc.h:1978
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1766
-
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1587
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2166
-
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1653
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2197
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1974
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2009
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1768
+
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1589
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2168
+
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1655
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2199
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1976
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2011
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1691
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1782
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1909
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1775
+
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1693
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1784
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1911
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1777
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1598
-
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1623
-
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1540
-
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1597
+
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1600
+
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1625
+
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1542
+
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1599
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2180
-
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1590
-
Definition: vk_mem_alloc.h:1940
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2182
+
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1592
+
Definition: vk_mem_alloc.h:1942
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1631
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2331
-
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1647
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1775
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1772
+
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1633
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2333
+
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1649
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1777
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1774
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2185
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2187
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
-
Definition: vk_mem_alloc.h:1949
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2312
-
Definition: vk_mem_alloc.h:1960
-
Definition: vk_mem_alloc.h:1972
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2583
-
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1583
+
Definition: vk_mem_alloc.h:1951
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2314
+
Definition: vk_mem_alloc.h:1962
+
Definition: vk_mem_alloc.h:1974
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2585
+
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1585
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1770
-
Definition: vk_mem_alloc.h:1825
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2134
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1772
+
Definition: vk_mem_alloc.h:1827
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2136
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1620
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1768
-
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1595
-
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1599
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1896
-
Definition: vk_mem_alloc.h:1967
-
Definition: vk_mem_alloc.h:1852
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2326
+
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1622
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1770
+
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1597
+
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1601
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1898
+
Definition: vk_mem_alloc.h:1969
+
Definition: vk_mem_alloc.h:1854
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2328
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
-
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1573
+
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1575
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
-
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1586
-
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2113
+
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1588
+
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2115
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2293
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2295
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:1957
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2078
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1776
+
Definition: vk_mem_alloc.h:1959
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2080
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1778
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
- -
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1607
-
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1783
+ +
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1609
+
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1785
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2191
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1776
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2193
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1778
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2298
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2300