diff --git a/bin/VmaReplay_Release_vs2015.exe b/bin/VmaReplay_Release_vs2015.exe index 1e08a58..6f29d4b 100644 Binary files a/bin/VmaReplay_Release_vs2015.exe and b/bin/VmaReplay_Release_vs2015.exe differ diff --git a/bin/VulkanSample_Release_vs2015.exe b/bin/VulkanSample_Release_vs2015.exe index 3958098..4686fdd 100644 Binary files a/bin/VulkanSample_Release_vs2015.exe and b/bin/VulkanSample_Release_vs2015.exe differ diff --git a/docs/html/custom_memory_pools.html b/docs/html/custom_memory_pools.html index 9f72b3f..8b7e428 100644 --- a/docs/html/custom_memory_pools.html +++ b/docs/html/custom_memory_pools.html @@ -132,7 +132,7 @@ Ring buffer
Ring buffer
-

Pools with linear algorithm support lost allocations when used as ring buffer. If there is not enough free space for a new allocation, but existing allocations from the front of the queue can become lost, they become lost and the allocation succeeds.

+

Pools with linear algorithm support lost allocations when used as ring buffer. If there is not enough free space for a new allocation, but existing allocations from the front of the queue can become lost, they become lost and the allocation succeeds.

Ring buffer with lost allocations
diff --git a/docs/html/index.html b/docs/html/index.html index e231206..336fb86 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -62,7 +62,7 @@ $(function() {
Vulkan Memory Allocator
-

Version 2.1.0-alpha.4 (2018-08-22)

+

Version 2.1.0-beta.1 (2018-08-24)

Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
License: MIT

Documentation of all members: vk_mem_alloc.h

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 4cf5c94..88e2ea2 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1342 #include <vulkan/vulkan.h>
1343 
1344 #if !defined(VMA_DEDICATED_ALLOCATION)
1345  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1346  #define VMA_DEDICATED_ALLOCATION 1
1347  #else
1348  #define VMA_DEDICATED_ALLOCATION 0
1349  #endif
1350 #endif
1351 
1361 VK_DEFINE_HANDLE(VmaAllocator)
1362 
1363 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1365  VmaAllocator allocator,
1366  uint32_t memoryType,
1367  VkDeviceMemory memory,
1368  VkDeviceSize size);
1370 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1371  VmaAllocator allocator,
1372  uint32_t memoryType,
1373  VkDeviceMemory memory,
1374  VkDeviceSize size);
1375 
1389 
1419 
1422 typedef VkFlags VmaAllocatorCreateFlags;
1423 
1428 typedef struct VmaVulkanFunctions {
1429  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1430  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1431  PFN_vkAllocateMemory vkAllocateMemory;
1432  PFN_vkFreeMemory vkFreeMemory;
1433  PFN_vkMapMemory vkMapMemory;
1434  PFN_vkUnmapMemory vkUnmapMemory;
1435  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1436  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1437  PFN_vkBindBufferMemory vkBindBufferMemory;
1438  PFN_vkBindImageMemory vkBindImageMemory;
1439  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1440  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1441  PFN_vkCreateBuffer vkCreateBuffer;
1442  PFN_vkDestroyBuffer vkDestroyBuffer;
1443  PFN_vkCreateImage vkCreateImage;
1444  PFN_vkDestroyImage vkDestroyImage;
1445 #if VMA_DEDICATED_ALLOCATION
1446  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1447  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1448 #endif
1450 
1452 typedef enum VmaRecordFlagBits {
1459 
1462 typedef VkFlags VmaRecordFlags;
1463 
1464 /*
1465 Define this macro to 0/1 to disable/enable support for recording functionality,
1466 available through VmaAllocatorCreateInfo::pRecordSettings.
1467 */
1468 #ifndef VMA_RECORDING_ENABLED
1469  #ifdef _WIN32
1470  #define VMA_RECORDING_ENABLED 1
1471  #else
1472  #define VMA_RECORDING_ENABLED 0
1473  #endif
1474 #endif
1475 
1477 typedef struct VmaRecordSettings
1478 {
1480  VmaRecordFlags flags;
1488  const char* pFilePath;
1490 
1493 {
1495  VmaAllocatorCreateFlags flags;
1497 
1498  VkPhysicalDevice physicalDevice;
1500 
1501  VkDevice device;
1503 
1506 
1507  const VkAllocationCallbacks* pAllocationCallbacks;
1509 
1548  const VkDeviceSize* pHeapSizeLimit;
1569 
1571 VkResult vmaCreateAllocator(
1572  const VmaAllocatorCreateInfo* pCreateInfo,
1573  VmaAllocator* pAllocator);
1574 
1576 void vmaDestroyAllocator(
1577  VmaAllocator allocator);
1578 
1584  VmaAllocator allocator,
1585  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1586 
1592  VmaAllocator allocator,
1593  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1594 
1602  VmaAllocator allocator,
1603  uint32_t memoryTypeIndex,
1604  VkMemoryPropertyFlags* pFlags);
1605 
1615  VmaAllocator allocator,
1616  uint32_t frameIndex);
1617 
1620 typedef struct VmaStatInfo
1621 {
1623  uint32_t blockCount;
1629  VkDeviceSize usedBytes;
1631  VkDeviceSize unusedBytes;
1632  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1633  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1634 } VmaStatInfo;
1635 
1637 typedef struct VmaStats
1638 {
1639  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1640  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1642 } VmaStats;
1643 
1645 void vmaCalculateStats(
1646  VmaAllocator allocator,
1647  VmaStats* pStats);
1648 
1649 #define VMA_STATS_STRING_ENABLED 1
1650 
1651 #if VMA_STATS_STRING_ENABLED
1652 
1654 
1656 void vmaBuildStatsString(
1657  VmaAllocator allocator,
1658  char** ppStatsString,
1659  VkBool32 detailedMap);
1660 
1661 void vmaFreeStatsString(
1662  VmaAllocator allocator,
1663  char* pStatsString);
1664 
1665 #endif // #if VMA_STATS_STRING_ENABLED
1666 
1675 VK_DEFINE_HANDLE(VmaPool)
1676 
1677 typedef enum VmaMemoryUsage
1678 {
1727 } VmaMemoryUsage;
1728 
1743 
1798 
1802 
1804 {
1806  VmaAllocationCreateFlags flags;
1817  VkMemoryPropertyFlags requiredFlags;
1822  VkMemoryPropertyFlags preferredFlags;
1830  uint32_t memoryTypeBits;
1843  void* pUserData;
1845 
1862 VkResult vmaFindMemoryTypeIndex(
1863  VmaAllocator allocator,
1864  uint32_t memoryTypeBits,
1865  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1866  uint32_t* pMemoryTypeIndex);
1867 
1881  VmaAllocator allocator,
1882  const VkBufferCreateInfo* pBufferCreateInfo,
1883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1884  uint32_t* pMemoryTypeIndex);
1885 
1899  VmaAllocator allocator,
1900  const VkImageCreateInfo* pImageCreateInfo,
1901  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1902  uint32_t* pMemoryTypeIndex);
1903 
1924 
1939 
1942 typedef VkFlags VmaPoolCreateFlags;
1943 
1946 typedef struct VmaPoolCreateInfo {
1952  VmaPoolCreateFlags flags;
1957  VkDeviceSize blockSize;
1987 
1990 typedef struct VmaPoolStats {
1993  VkDeviceSize size;
1996  VkDeviceSize unusedSize;
2009  VkDeviceSize unusedRangeSizeMax;
2010 } VmaPoolStats;
2011 
2018 VkResult vmaCreatePool(
2019  VmaAllocator allocator,
2020  const VmaPoolCreateInfo* pCreateInfo,
2021  VmaPool* pPool);
2022 
2025 void vmaDestroyPool(
2026  VmaAllocator allocator,
2027  VmaPool pool);
2028 
2035 void vmaGetPoolStats(
2036  VmaAllocator allocator,
2037  VmaPool pool,
2038  VmaPoolStats* pPoolStats);
2039 
2047  VmaAllocator allocator,
2048  VmaPool pool,
2049  size_t* pLostAllocationCount);
2050 
2065 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2066 
2091 VK_DEFINE_HANDLE(VmaAllocation)
2092 
2093 
2095 typedef struct VmaAllocationInfo {
2100  uint32_t memoryType;
2109  VkDeviceMemory deviceMemory;
2114  VkDeviceSize offset;
2119  VkDeviceSize size;
2133  void* pUserData;
2135 
2146 VkResult vmaAllocateMemory(
2147  VmaAllocator allocator,
2148  const VkMemoryRequirements* pVkMemoryRequirements,
2149  const VmaAllocationCreateInfo* pCreateInfo,
2150  VmaAllocation* pAllocation,
2151  VmaAllocationInfo* pAllocationInfo);
2152 
2160  VmaAllocator allocator,
2161  VkBuffer buffer,
2162  const VmaAllocationCreateInfo* pCreateInfo,
2163  VmaAllocation* pAllocation,
2164  VmaAllocationInfo* pAllocationInfo);
2165 
2167 VkResult vmaAllocateMemoryForImage(
2168  VmaAllocator allocator,
2169  VkImage image,
2170  const VmaAllocationCreateInfo* pCreateInfo,
2171  VmaAllocation* pAllocation,
2172  VmaAllocationInfo* pAllocationInfo);
2173 
2175 void vmaFreeMemory(
2176  VmaAllocator allocator,
2177  VmaAllocation allocation);
2178 
2196  VmaAllocator allocator,
2197  VmaAllocation allocation,
2198  VmaAllocationInfo* pAllocationInfo);
2199 
2214 VkBool32 vmaTouchAllocation(
2215  VmaAllocator allocator,
2216  VmaAllocation allocation);
2217 
2232  VmaAllocator allocator,
2233  VmaAllocation allocation,
2234  void* pUserData);
2235 
2247  VmaAllocator allocator,
2248  VmaAllocation* pAllocation);
2249 
2284 VkResult vmaMapMemory(
2285  VmaAllocator allocator,
2286  VmaAllocation allocation,
2287  void** ppData);
2288 
2293 void vmaUnmapMemory(
2294  VmaAllocator allocator,
2295  VmaAllocation allocation);
2296 
2309 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2310 
2323 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2324 
2341 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2342 
2344 typedef struct VmaDefragmentationInfo {
2349  VkDeviceSize maxBytesToMove;
2356 
2358 typedef struct VmaDefragmentationStats {
2360  VkDeviceSize bytesMoved;
2362  VkDeviceSize bytesFreed;
2368 
2455 VkResult vmaDefragment(
2456  VmaAllocator allocator,
2457  VmaAllocation* pAllocations,
2458  size_t allocationCount,
2459  VkBool32* pAllocationsChanged,
2460  const VmaDefragmentationInfo *pDefragmentationInfo,
2461  VmaDefragmentationStats* pDefragmentationStats);
2462 
2475 VkResult vmaBindBufferMemory(
2476  VmaAllocator allocator,
2477  VmaAllocation allocation,
2478  VkBuffer buffer);
2479 
2492 VkResult vmaBindImageMemory(
2493  VmaAllocator allocator,
2494  VmaAllocation allocation,
2495  VkImage image);
2496 
2523 VkResult vmaCreateBuffer(
2524  VmaAllocator allocator,
2525  const VkBufferCreateInfo* pBufferCreateInfo,
2526  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2527  VkBuffer* pBuffer,
2528  VmaAllocation* pAllocation,
2529  VmaAllocationInfo* pAllocationInfo);
2530 
2542 void vmaDestroyBuffer(
2543  VmaAllocator allocator,
2544  VkBuffer buffer,
2545  VmaAllocation allocation);
2546 
2548 VkResult vmaCreateImage(
2549  VmaAllocator allocator,
2550  const VkImageCreateInfo* pImageCreateInfo,
2551  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2552  VkImage* pImage,
2553  VmaAllocation* pAllocation,
2554  VmaAllocationInfo* pAllocationInfo);
2555 
2567 void vmaDestroyImage(
2568  VmaAllocator allocator,
2569  VkImage image,
2570  VmaAllocation allocation);
2571 
2572 #ifdef __cplusplus
2573 }
2574 #endif
2575 
2576 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2577 
2578 // For Visual Studio IntelliSense.
2579 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2580 #define VMA_IMPLEMENTATION
2581 #endif
2582 
2583 #ifdef VMA_IMPLEMENTATION
2584 #undef VMA_IMPLEMENTATION
2585 
2586 #include <cstdint>
2587 #include <cstdlib>
2588 #include <cstring>
2589 
2590 /*******************************************************************************
2591 CONFIGURATION SECTION
2592 
2593 Define some of these macros before each #include of this header or change them
2594 here if you need other then default behavior depending on your environment.
2595 */
2596 
2597 /*
2598 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2599 internally, like:
2600 
2601  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2602 
2603 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2604 VmaAllocatorCreateInfo::pVulkanFunctions.
2605 */
2606 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2607 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2608 #endif
2609 
2610 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2611 //#define VMA_USE_STL_CONTAINERS 1
2612 
2613 /* Set this macro to 1 to make the library including and using STL containers:
2614 std::pair, std::vector, std::list, std::unordered_map.
2615 
2616 Set it to 0 or undefined to make the library using its own implementation of
2617 the containers.
2618 */
2619 #if VMA_USE_STL_CONTAINERS
2620  #define VMA_USE_STL_VECTOR 1
2621  #define VMA_USE_STL_UNORDERED_MAP 1
2622  #define VMA_USE_STL_LIST 1
2623 #endif
2624 
2625 #if VMA_USE_STL_VECTOR
2626  #include <vector>
2627 #endif
2628 
2629 #if VMA_USE_STL_UNORDERED_MAP
2630  #include <unordered_map>
2631 #endif
2632 
2633 #if VMA_USE_STL_LIST
2634  #include <list>
2635 #endif
2636 
2637 /*
2638 Following headers are used in this CONFIGURATION section only, so feel free to
2639 remove them if not needed.
2640 */
2641 #include <cassert> // for assert
2642 #include <algorithm> // for min, max
2643 #include <mutex> // for std::mutex
2644 #include <atomic> // for std::atomic
2645 
2646 #ifndef VMA_NULL
2647  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2648  #define VMA_NULL nullptr
2649 #endif
2650 
2651 #if defined(__APPLE__) || defined(__ANDROID__)
2652 #include <cstdlib>
2653 void *aligned_alloc(size_t alignment, size_t size)
2654 {
2655  // alignment must be >= sizeof(void*)
2656  if(alignment < sizeof(void*))
2657  {
2658  alignment = sizeof(void*);
2659  }
2660 
2661  void *pointer;
2662  if(posix_memalign(&pointer, alignment, size) == 0)
2663  return pointer;
2664  return VMA_NULL;
2665 }
2666 #endif
2667 
2668 // If your compiler is not compatible with C++11 and definition of
2669 // aligned_alloc() function is missing, uncommeting following line may help:
2670 
2671 //#include <malloc.h>
2672 
2673 // Normal assert to check for programmer's errors, especially in Debug configuration.
2674 #ifndef VMA_ASSERT
2675  #ifdef _DEBUG
2676  #define VMA_ASSERT(expr) assert(expr)
2677  #else
2678  #define VMA_ASSERT(expr)
2679  #endif
2680 #endif
2681 
2682 // Assert that will be called very often, like inside data structures e.g. operator[].
2683 // Making it non-empty can make program slow.
2684 #ifndef VMA_HEAVY_ASSERT
2685  #ifdef _DEBUG
2686  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2687  #else
2688  #define VMA_HEAVY_ASSERT(expr)
2689  #endif
2690 #endif
2691 
2692 #ifndef VMA_ALIGN_OF
2693  #define VMA_ALIGN_OF(type) (__alignof(type))
2694 #endif
2695 
2696 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2697  #if defined(_WIN32)
2698  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2699  #else
2700  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2701  #endif
2702 #endif
2703 
2704 #ifndef VMA_SYSTEM_FREE
2705  #if defined(_WIN32)
2706  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2707  #else
2708  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2709  #endif
2710 #endif
2711 
2712 #ifndef VMA_MIN
2713  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2714 #endif
2715 
2716 #ifndef VMA_MAX
2717  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2718 #endif
2719 
2720 #ifndef VMA_SWAP
2721  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2722 #endif
2723 
2724 #ifndef VMA_SORT
2725  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2726 #endif
2727 
2728 #ifndef VMA_DEBUG_LOG
2729  #define VMA_DEBUG_LOG(format, ...)
2730  /*
2731  #define VMA_DEBUG_LOG(format, ...) do { \
2732  printf(format, __VA_ARGS__); \
2733  printf("\n"); \
2734  } while(false)
2735  */
2736 #endif
2737 
2738 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2739 #if VMA_STATS_STRING_ENABLED
2740  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2741  {
2742  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2743  }
2744  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2745  {
2746  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2747  }
2748  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2749  {
2750  snprintf(outStr, strLen, "%p", ptr);
2751  }
2752 #endif
2753 
2754 #ifndef VMA_MUTEX
2755  class VmaMutex
2756  {
2757  public:
2758  VmaMutex() { }
2759  ~VmaMutex() { }
2760  void Lock() { m_Mutex.lock(); }
2761  void Unlock() { m_Mutex.unlock(); }
2762  private:
2763  std::mutex m_Mutex;
2764  };
2765  #define VMA_MUTEX VmaMutex
2766 #endif
2767 
2768 /*
2769 If providing your own implementation, you need to implement a subset of std::atomic:
2770 
2771 - Constructor(uint32_t desired)
2772 - uint32_t load() const
2773 - void store(uint32_t desired)
2774 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2775 */
2776 #ifndef VMA_ATOMIC_UINT32
2777  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2778 #endif
2779 
2780 #ifndef VMA_BEST_FIT
2781 
2793  #define VMA_BEST_FIT (1)
2794 #endif
2795 
2796 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2797 
2801  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2802 #endif
2803 
2804 #ifndef VMA_DEBUG_ALIGNMENT
2805 
2809  #define VMA_DEBUG_ALIGNMENT (1)
2810 #endif
2811 
2812 #ifndef VMA_DEBUG_MARGIN
2813 
2817  #define VMA_DEBUG_MARGIN (0)
2818 #endif
2819 
2820 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2821 
2825  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2826 #endif
2827 
2828 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2829 
2834  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2835 #endif
2836 
2837 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2838 
2842  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2843 #endif
2844 
2845 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2846 
2850  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2851 #endif
2852 
2853 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2854  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2856 #endif
2857 
2858 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2859  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2861 #endif
2862 
2863 #ifndef VMA_CLASS_NO_COPY
2864  #define VMA_CLASS_NO_COPY(className) \
2865  private: \
2866  className(const className&) = delete; \
2867  className& operator=(const className&) = delete;
2868 #endif
2869 
2870 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2871 
2872 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2873 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2874 
2875 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2876 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2877 
2878 /*******************************************************************************
2879 END OF CONFIGURATION
2880 */
2881 
2882 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2883  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2884 
2885 // Returns number of bits set to 1 in (v).
2886 static inline uint32_t VmaCountBitsSet(uint32_t v)
2887 {
2888  uint32_t c = v - ((v >> 1) & 0x55555555);
2889  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2890  c = ((c >> 4) + c) & 0x0F0F0F0F;
2891  c = ((c >> 8) + c) & 0x00FF00FF;
2892  c = ((c >> 16) + c) & 0x0000FFFF;
2893  return c;
2894 }
2895 
2896 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2897 // Use types like uint32_t, uint64_t as T.
2898 template <typename T>
2899 static inline T VmaAlignUp(T val, T align)
2900 {
2901  return (val + align - 1) / align * align;
2902 }
2903 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2904 // Use types like uint32_t, uint64_t as T.
2905 template <typename T>
2906 static inline T VmaAlignDown(T val, T align)
2907 {
2908  return val / align * align;
2909 }
2910 
2911 // Division with mathematical rounding to nearest number.
2912 template <typename T>
2913 inline T VmaRoundDiv(T x, T y)
2914 {
2915  return (x + (y / (T)2)) / y;
2916 }
2917 
2918 static inline bool VmaStrIsEmpty(const char* pStr)
2919 {
2920  return pStr == VMA_NULL || *pStr == '\0';
2921 }
2922 
2923 #ifndef VMA_SORT
2924 
2925 template<typename Iterator, typename Compare>
2926 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2927 {
2928  Iterator centerValue = end; --centerValue;
2929  Iterator insertIndex = beg;
2930  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2931  {
2932  if(cmp(*memTypeIndex, *centerValue))
2933  {
2934  if(insertIndex != memTypeIndex)
2935  {
2936  VMA_SWAP(*memTypeIndex, *insertIndex);
2937  }
2938  ++insertIndex;
2939  }
2940  }
2941  if(insertIndex != centerValue)
2942  {
2943  VMA_SWAP(*insertIndex, *centerValue);
2944  }
2945  return insertIndex;
2946 }
2947 
2948 template<typename Iterator, typename Compare>
2949 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2950 {
2951  if(beg < end)
2952  {
2953  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2954  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2955  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2956  }
2957 }
2958 
2959 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2960 
2961 #endif // #ifndef VMA_SORT
2962 
2963 /*
2964 Returns true if two memory blocks occupy overlapping pages.
2965 ResourceA must be in less memory offset than ResourceB.
2966 
2967 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2968 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2969 */
2970 static inline bool VmaBlocksOnSamePage(
2971  VkDeviceSize resourceAOffset,
2972  VkDeviceSize resourceASize,
2973  VkDeviceSize resourceBOffset,
2974  VkDeviceSize pageSize)
2975 {
2976  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2977  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2978  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2979  VkDeviceSize resourceBStart = resourceBOffset;
2980  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2981  return resourceAEndPage == resourceBStartPage;
2982 }
2983 
2984 enum VmaSuballocationType
2985 {
2986  VMA_SUBALLOCATION_TYPE_FREE = 0,
2987  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2988  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2989  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2990  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2991  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2992  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2993 };
2994 
2995 /*
2996 Returns true if given suballocation types could conflict and must respect
2997 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2998 or linear image and another one is optimal image. If type is unknown, behave
2999 conservatively.
3000 */
3001 static inline bool VmaIsBufferImageGranularityConflict(
3002  VmaSuballocationType suballocType1,
3003  VmaSuballocationType suballocType2)
3004 {
3005  if(suballocType1 > suballocType2)
3006  {
3007  VMA_SWAP(suballocType1, suballocType2);
3008  }
3009 
3010  switch(suballocType1)
3011  {
3012  case VMA_SUBALLOCATION_TYPE_FREE:
3013  return false;
3014  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3015  return true;
3016  case VMA_SUBALLOCATION_TYPE_BUFFER:
3017  return
3018  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3019  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3020  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3021  return
3022  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3023  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3024  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3025  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3026  return
3027  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3028  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3029  return false;
3030  default:
3031  VMA_ASSERT(0);
3032  return true;
3033  }
3034 }
3035 
3036 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3037 {
3038  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3039  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3040  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3041  {
3042  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3043  }
3044 }
3045 
3046 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3047 {
3048  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3049  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3050  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3051  {
3052  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3053  {
3054  return false;
3055  }
3056  }
3057  return true;
3058 }
3059 
3060 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3061 struct VmaMutexLock
3062 {
3063  VMA_CLASS_NO_COPY(VmaMutexLock)
3064 public:
3065  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3066  m_pMutex(useMutex ? &mutex : VMA_NULL)
3067  {
3068  if(m_pMutex)
3069  {
3070  m_pMutex->Lock();
3071  }
3072  }
3073 
3074  ~VmaMutexLock()
3075  {
3076  if(m_pMutex)
3077  {
3078  m_pMutex->Unlock();
3079  }
3080  }
3081 
3082 private:
3083  VMA_MUTEX* m_pMutex;
3084 };
3085 
3086 #if VMA_DEBUG_GLOBAL_MUTEX
3087  static VMA_MUTEX gDebugGlobalMutex;
3088  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3089 #else
3090  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3091 #endif
3092 
3093 // Minimum size of a free suballocation to register it in the free suballocation collection.
3094 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3095 
3096 /*
3097 Performs binary search and returns iterator to first element that is greater or
3098 equal to (key), according to comparison (cmp).
3099 
3100 Cmp should return true if first argument is less than second argument.
3101 
3102 Returned value is the found element, if present in the collection or place where
3103 new element with value (key) should be inserted.
3104 */
3105 template <typename CmpLess, typename IterT, typename KeyT>
3106 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3107 {
3108  size_t down = 0, up = (end - beg);
3109  while(down < up)
3110  {
3111  const size_t mid = (down + up) / 2;
3112  if(cmp(*(beg+mid), key))
3113  {
3114  down = mid + 1;
3115  }
3116  else
3117  {
3118  up = mid;
3119  }
3120  }
3121  return beg + down;
3122 }
3123 
3125 // Memory allocation
3126 
3127 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3128 {
3129  if((pAllocationCallbacks != VMA_NULL) &&
3130  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3131  {
3132  return (*pAllocationCallbacks->pfnAllocation)(
3133  pAllocationCallbacks->pUserData,
3134  size,
3135  alignment,
3136  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3137  }
3138  else
3139  {
3140  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3141  }
3142 }
3143 
3144 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3145 {
3146  if((pAllocationCallbacks != VMA_NULL) &&
3147  (pAllocationCallbacks->pfnFree != VMA_NULL))
3148  {
3149  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3150  }
3151  else
3152  {
3153  VMA_SYSTEM_FREE(ptr);
3154  }
3155 }
3156 
3157 template<typename T>
3158 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3159 {
3160  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3161 }
3162 
3163 template<typename T>
3164 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3165 {
3166  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3167 }
3168 
3169 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3170 
3171 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3172 
3173 template<typename T>
3174 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3175 {
3176  ptr->~T();
3177  VmaFree(pAllocationCallbacks, ptr);
3178 }
3179 
3180 template<typename T>
3181 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3182 {
3183  if(ptr != VMA_NULL)
3184  {
3185  for(size_t i = count; i--; )
3186  {
3187  ptr[i].~T();
3188  }
3189  VmaFree(pAllocationCallbacks, ptr);
3190  }
3191 }
3192 
3193 // STL-compatible allocator.
3194 template<typename T>
3195 class VmaStlAllocator
3196 {
3197 public:
3198  const VkAllocationCallbacks* const m_pCallbacks;
3199  typedef T value_type;
3200 
3201  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3202  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3203 
3204  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3205  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3206 
3207  template<typename U>
3208  bool operator==(const VmaStlAllocator<U>& rhs) const
3209  {
3210  return m_pCallbacks == rhs.m_pCallbacks;
3211  }
3212  template<typename U>
3213  bool operator!=(const VmaStlAllocator<U>& rhs) const
3214  {
3215  return m_pCallbacks != rhs.m_pCallbacks;
3216  }
3217 
3218  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3219 };
3220 
3221 #if VMA_USE_STL_VECTOR
3222 
3223 #define VmaVector std::vector
3224 
3225 template<typename T, typename allocatorT>
3226 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3227 {
3228  vec.insert(vec.begin() + index, item);
3229 }
3230 
3231 template<typename T, typename allocatorT>
3232 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3233 {
3234  vec.erase(vec.begin() + index);
3235 }
3236 
3237 #else // #if VMA_USE_STL_VECTOR
3238 
3239 /* Class with interface compatible with subset of std::vector.
3240 T must be POD because constructors and destructors are not called and memcpy is
3241 used for these objects. */
3242 template<typename T, typename AllocatorT>
3243 class VmaVector
3244 {
3245 public:
3246  typedef T value_type;
3247 
3248  VmaVector(const AllocatorT& allocator) :
3249  m_Allocator(allocator),
3250  m_pArray(VMA_NULL),
3251  m_Count(0),
3252  m_Capacity(0)
3253  {
3254  }
3255 
3256  VmaVector(size_t count, const AllocatorT& allocator) :
3257  m_Allocator(allocator),
3258  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3259  m_Count(count),
3260  m_Capacity(count)
3261  {
3262  }
3263 
3264  VmaVector(const VmaVector<T, AllocatorT>& src) :
3265  m_Allocator(src.m_Allocator),
3266  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3267  m_Count(src.m_Count),
3268  m_Capacity(src.m_Count)
3269  {
3270  if(m_Count != 0)
3271  {
3272  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3273  }
3274  }
3275 
3276  ~VmaVector()
3277  {
3278  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3279  }
3280 
3281  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3282  {
3283  if(&rhs != this)
3284  {
3285  resize(rhs.m_Count);
3286  if(m_Count != 0)
3287  {
3288  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3289  }
3290  }
3291  return *this;
3292  }
3293 
3294  bool empty() const { return m_Count == 0; }
3295  size_t size() const { return m_Count; }
3296  T* data() { return m_pArray; }
3297  const T* data() const { return m_pArray; }
3298 
3299  T& operator[](size_t index)
3300  {
3301  VMA_HEAVY_ASSERT(index < m_Count);
3302  return m_pArray[index];
3303  }
3304  const T& operator[](size_t index) const
3305  {
3306  VMA_HEAVY_ASSERT(index < m_Count);
3307  return m_pArray[index];
3308  }
3309 
3310  T& front()
3311  {
3312  VMA_HEAVY_ASSERT(m_Count > 0);
3313  return m_pArray[0];
3314  }
3315  const T& front() const
3316  {
3317  VMA_HEAVY_ASSERT(m_Count > 0);
3318  return m_pArray[0];
3319  }
3320  T& back()
3321  {
3322  VMA_HEAVY_ASSERT(m_Count > 0);
3323  return m_pArray[m_Count - 1];
3324  }
3325  const T& back() const
3326  {
3327  VMA_HEAVY_ASSERT(m_Count > 0);
3328  return m_pArray[m_Count - 1];
3329  }
3330 
3331  void reserve(size_t newCapacity, bool freeMemory = false)
3332  {
3333  newCapacity = VMA_MAX(newCapacity, m_Count);
3334 
3335  if((newCapacity < m_Capacity) && !freeMemory)
3336  {
3337  newCapacity = m_Capacity;
3338  }
3339 
3340  if(newCapacity != m_Capacity)
3341  {
3342  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3343  if(m_Count != 0)
3344  {
3345  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3346  }
3347  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3348  m_Capacity = newCapacity;
3349  m_pArray = newArray;
3350  }
3351  }
3352 
3353  void resize(size_t newCount, bool freeMemory = false)
3354  {
3355  size_t newCapacity = m_Capacity;
3356  if(newCount > m_Capacity)
3357  {
3358  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3359  }
3360  else if(freeMemory)
3361  {
3362  newCapacity = newCount;
3363  }
3364 
3365  if(newCapacity != m_Capacity)
3366  {
3367  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3368  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3369  if(elementsToCopy != 0)
3370  {
3371  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3372  }
3373  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3374  m_Capacity = newCapacity;
3375  m_pArray = newArray;
3376  }
3377 
3378  m_Count = newCount;
3379  }
3380 
3381  void clear(bool freeMemory = false)
3382  {
3383  resize(0, freeMemory);
3384  }
3385 
3386  void insert(size_t index, const T& src)
3387  {
3388  VMA_HEAVY_ASSERT(index <= m_Count);
3389  const size_t oldCount = size();
3390  resize(oldCount + 1);
3391  if(index < oldCount)
3392  {
3393  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3394  }
3395  m_pArray[index] = src;
3396  }
3397 
3398  void remove(size_t index)
3399  {
3400  VMA_HEAVY_ASSERT(index < m_Count);
3401  const size_t oldCount = size();
3402  if(index < oldCount - 1)
3403  {
3404  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3405  }
3406  resize(oldCount - 1);
3407  }
3408 
3409  void push_back(const T& src)
3410  {
3411  const size_t newIndex = size();
3412  resize(newIndex + 1);
3413  m_pArray[newIndex] = src;
3414  }
3415 
3416  void pop_back()
3417  {
3418  VMA_HEAVY_ASSERT(m_Count > 0);
3419  resize(size() - 1);
3420  }
3421 
3422  void push_front(const T& src)
3423  {
3424  insert(0, src);
3425  }
3426 
3427  void pop_front()
3428  {
3429  VMA_HEAVY_ASSERT(m_Count > 0);
3430  remove(0);
3431  }
3432 
3433  typedef T* iterator;
3434 
3435  iterator begin() { return m_pArray; }
3436  iterator end() { return m_pArray + m_Count; }
3437 
3438 private:
3439  AllocatorT m_Allocator;
3440  T* m_pArray;
3441  size_t m_Count;
3442  size_t m_Capacity;
3443 };
3444 
3445 template<typename T, typename allocatorT>
3446 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3447 {
3448  vec.insert(index, item);
3449 }
3450 
3451 template<typename T, typename allocatorT>
3452 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3453 {
3454  vec.remove(index);
3455 }
3456 
3457 #endif // #if VMA_USE_STL_VECTOR
3458 
3459 template<typename CmpLess, typename VectorT>
3460 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3461 {
3462  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3463  vector.data(),
3464  vector.data() + vector.size(),
3465  value,
3466  CmpLess()) - vector.data();
3467  VmaVectorInsert(vector, indexToInsert, value);
3468  return indexToInsert;
3469 }
3470 
3471 template<typename CmpLess, typename VectorT>
3472 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3473 {
3474  CmpLess comparator;
3475  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3476  vector.begin(),
3477  vector.end(),
3478  value,
3479  comparator);
3480  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3481  {
3482  size_t indexToRemove = it - vector.begin();
3483  VmaVectorRemove(vector, indexToRemove);
3484  return true;
3485  }
3486  return false;
3487 }
3488 
3489 template<typename CmpLess, typename IterT, typename KeyT>
3490 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3491 {
3492  CmpLess comparator;
3493  typename IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3494  beg, end, value, comparator);
3495  if(it == end ||
3496  !comparator(*it, value) && !comparator(value, *it))
3497  {
3498  return it;
3499  }
3500  return end;
3501 }
3502 
3504 // class VmaPoolAllocator
3505 
3506 /*
3507 Allocator for objects of type T using a list of arrays (pools) to speed up
3508 allocation. Number of elements that can be allocated is not bounded because
3509 allocator can create multiple blocks.
3510 */
3511 template<typename T>
3512 class VmaPoolAllocator
3513 {
3514  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3515 public:
3516  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3517  ~VmaPoolAllocator();
3518  void Clear();
3519  T* Alloc();
3520  void Free(T* ptr);
3521 
3522 private:
3523  union Item
3524  {
3525  uint32_t NextFreeIndex;
3526  T Value;
3527  };
3528 
3529  struct ItemBlock
3530  {
3531  Item* pItems;
3532  uint32_t FirstFreeIndex;
3533  };
3534 
3535  const VkAllocationCallbacks* m_pAllocationCallbacks;
3536  size_t m_ItemsPerBlock;
3537  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3538 
3539  ItemBlock& CreateNewBlock();
3540 };
3541 
3542 template<typename T>
3543 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3544  m_pAllocationCallbacks(pAllocationCallbacks),
3545  m_ItemsPerBlock(itemsPerBlock),
3546  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3547 {
3548  VMA_ASSERT(itemsPerBlock > 0);
3549 }
3550 
3551 template<typename T>
3552 VmaPoolAllocator<T>::~VmaPoolAllocator()
3553 {
3554  Clear();
3555 }
3556 
3557 template<typename T>
3558 void VmaPoolAllocator<T>::Clear()
3559 {
3560  for(size_t i = m_ItemBlocks.size(); i--; )
3561  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3562  m_ItemBlocks.clear();
3563 }
3564 
3565 template<typename T>
3566 T* VmaPoolAllocator<T>::Alloc()
3567 {
3568  for(size_t i = m_ItemBlocks.size(); i--; )
3569  {
3570  ItemBlock& block = m_ItemBlocks[i];
3571  // This block has some free items: Use first one.
3572  if(block.FirstFreeIndex != UINT32_MAX)
3573  {
3574  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3575  block.FirstFreeIndex = pItem->NextFreeIndex;
3576  return &pItem->Value;
3577  }
3578  }
3579 
3580  // No block has free item: Create new one and use it.
3581  ItemBlock& newBlock = CreateNewBlock();
3582  Item* const pItem = &newBlock.pItems[0];
3583  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3584  return &pItem->Value;
3585 }
3586 
3587 template<typename T>
3588 void VmaPoolAllocator<T>::Free(T* ptr)
3589 {
3590  // Search all memory blocks to find ptr.
3591  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3592  {
3593  ItemBlock& block = m_ItemBlocks[i];
3594 
3595  // Casting to union.
3596  Item* pItemPtr;
3597  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3598 
3599  // Check if pItemPtr is in address range of this block.
3600  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3601  {
3602  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3603  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3604  block.FirstFreeIndex = index;
3605  return;
3606  }
3607  }
3608  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3609 }
3610 
3611 template<typename T>
3612 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3613 {
3614  ItemBlock newBlock = {
3615  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3616 
3617  m_ItemBlocks.push_back(newBlock);
3618 
3619  // Setup singly-linked list of all free items in this block.
3620  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3621  newBlock.pItems[i].NextFreeIndex = i + 1;
3622  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3623  return m_ItemBlocks.back();
3624 }
3625 
3627 // class VmaRawList, VmaList
3628 
3629 #if VMA_USE_STL_LIST
3630 
3631 #define VmaList std::list
3632 
3633 #else // #if VMA_USE_STL_LIST
3634 
3635 template<typename T>
3636 struct VmaListItem
3637 {
3638  VmaListItem* pPrev;
3639  VmaListItem* pNext;
3640  T Value;
3641 };
3642 
3643 // Doubly linked list.
3644 template<typename T>
3645 class VmaRawList
3646 {
3647  VMA_CLASS_NO_COPY(VmaRawList)
3648 public:
3649  typedef VmaListItem<T> ItemType;
3650 
3651  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3652  ~VmaRawList();
3653  void Clear();
3654 
3655  size_t GetCount() const { return m_Count; }
3656  bool IsEmpty() const { return m_Count == 0; }
3657 
3658  ItemType* Front() { return m_pFront; }
3659  const ItemType* Front() const { return m_pFront; }
3660  ItemType* Back() { return m_pBack; }
3661  const ItemType* Back() const { return m_pBack; }
3662 
3663  ItemType* PushBack();
3664  ItemType* PushFront();
3665  ItemType* PushBack(const T& value);
3666  ItemType* PushFront(const T& value);
3667  void PopBack();
3668  void PopFront();
3669 
3670  // Item can be null - it means PushBack.
3671  ItemType* InsertBefore(ItemType* pItem);
3672  // Item can be null - it means PushFront.
3673  ItemType* InsertAfter(ItemType* pItem);
3674 
3675  ItemType* InsertBefore(ItemType* pItem, const T& value);
3676  ItemType* InsertAfter(ItemType* pItem, const T& value);
3677 
3678  void Remove(ItemType* pItem);
3679 
3680 private:
3681  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3682  VmaPoolAllocator<ItemType> m_ItemAllocator;
3683  ItemType* m_pFront;
3684  ItemType* m_pBack;
3685  size_t m_Count;
3686 };
3687 
3688 template<typename T>
3689 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3690  m_pAllocationCallbacks(pAllocationCallbacks),
3691  m_ItemAllocator(pAllocationCallbacks, 128),
3692  m_pFront(VMA_NULL),
3693  m_pBack(VMA_NULL),
3694  m_Count(0)
3695 {
3696 }
3697 
3698 template<typename T>
3699 VmaRawList<T>::~VmaRawList()
3700 {
3701  // Intentionally not calling Clear, because that would be unnecessary
3702  // computations to return all items to m_ItemAllocator as free.
3703 }
3704 
3705 template<typename T>
3706 void VmaRawList<T>::Clear()
3707 {
3708  if(IsEmpty() == false)
3709  {
3710  ItemType* pItem = m_pBack;
3711  while(pItem != VMA_NULL)
3712  {
3713  ItemType* const pPrevItem = pItem->pPrev;
3714  m_ItemAllocator.Free(pItem);
3715  pItem = pPrevItem;
3716  }
3717  m_pFront = VMA_NULL;
3718  m_pBack = VMA_NULL;
3719  m_Count = 0;
3720  }
3721 }
3722 
3723 template<typename T>
3724 VmaListItem<T>* VmaRawList<T>::PushBack()
3725 {
3726  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3727  pNewItem->pNext = VMA_NULL;
3728  if(IsEmpty())
3729  {
3730  pNewItem->pPrev = VMA_NULL;
3731  m_pFront = pNewItem;
3732  m_pBack = pNewItem;
3733  m_Count = 1;
3734  }
3735  else
3736  {
3737  pNewItem->pPrev = m_pBack;
3738  m_pBack->pNext = pNewItem;
3739  m_pBack = pNewItem;
3740  ++m_Count;
3741  }
3742  return pNewItem;
3743 }
3744 
3745 template<typename T>
3746 VmaListItem<T>* VmaRawList<T>::PushFront()
3747 {
3748  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3749  pNewItem->pPrev = VMA_NULL;
3750  if(IsEmpty())
3751  {
3752  pNewItem->pNext = VMA_NULL;
3753  m_pFront = pNewItem;
3754  m_pBack = pNewItem;
3755  m_Count = 1;
3756  }
3757  else
3758  {
3759  pNewItem->pNext = m_pFront;
3760  m_pFront->pPrev = pNewItem;
3761  m_pFront = pNewItem;
3762  ++m_Count;
3763  }
3764  return pNewItem;
3765 }
3766 
3767 template<typename T>
3768 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3769 {
3770  ItemType* const pNewItem = PushBack();
3771  pNewItem->Value = value;
3772  return pNewItem;
3773 }
3774 
3775 template<typename T>
3776 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3777 {
3778  ItemType* const pNewItem = PushFront();
3779  pNewItem->Value = value;
3780  return pNewItem;
3781 }
3782 
3783 template<typename T>
3784 void VmaRawList<T>::PopBack()
3785 {
3786  VMA_HEAVY_ASSERT(m_Count > 0);
3787  ItemType* const pBackItem = m_pBack;
3788  ItemType* const pPrevItem = pBackItem->pPrev;
3789  if(pPrevItem != VMA_NULL)
3790  {
3791  pPrevItem->pNext = VMA_NULL;
3792  }
3793  m_pBack = pPrevItem;
3794  m_ItemAllocator.Free(pBackItem);
3795  --m_Count;
3796 }
3797 
3798 template<typename T>
3799 void VmaRawList<T>::PopFront()
3800 {
3801  VMA_HEAVY_ASSERT(m_Count > 0);
3802  ItemType* const pFrontItem = m_pFront;
3803  ItemType* const pNextItem = pFrontItem->pNext;
3804  if(pNextItem != VMA_NULL)
3805  {
3806  pNextItem->pPrev = VMA_NULL;
3807  }
3808  m_pFront = pNextItem;
3809  m_ItemAllocator.Free(pFrontItem);
3810  --m_Count;
3811 }
3812 
3813 template<typename T>
3814 void VmaRawList<T>::Remove(ItemType* pItem)
3815 {
3816  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3817  VMA_HEAVY_ASSERT(m_Count > 0);
3818 
3819  if(pItem->pPrev != VMA_NULL)
3820  {
3821  pItem->pPrev->pNext = pItem->pNext;
3822  }
3823  else
3824  {
3825  VMA_HEAVY_ASSERT(m_pFront == pItem);
3826  m_pFront = pItem->pNext;
3827  }
3828 
3829  if(pItem->pNext != VMA_NULL)
3830  {
3831  pItem->pNext->pPrev = pItem->pPrev;
3832  }
3833  else
3834  {
3835  VMA_HEAVY_ASSERT(m_pBack == pItem);
3836  m_pBack = pItem->pPrev;
3837  }
3838 
3839  m_ItemAllocator.Free(pItem);
3840  --m_Count;
3841 }
3842 
3843 template<typename T>
3844 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3845 {
3846  if(pItem != VMA_NULL)
3847  {
3848  ItemType* const prevItem = pItem->pPrev;
3849  ItemType* const newItem = m_ItemAllocator.Alloc();
3850  newItem->pPrev = prevItem;
3851  newItem->pNext = pItem;
3852  pItem->pPrev = newItem;
3853  if(prevItem != VMA_NULL)
3854  {
3855  prevItem->pNext = newItem;
3856  }
3857  else
3858  {
3859  VMA_HEAVY_ASSERT(m_pFront == pItem);
3860  m_pFront = newItem;
3861  }
3862  ++m_Count;
3863  return newItem;
3864  }
3865  else
3866  return PushBack();
3867 }
3868 
3869 template<typename T>
3870 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3871 {
3872  if(pItem != VMA_NULL)
3873  {
3874  ItemType* const nextItem = pItem->pNext;
3875  ItemType* const newItem = m_ItemAllocator.Alloc();
3876  newItem->pNext = nextItem;
3877  newItem->pPrev = pItem;
3878  pItem->pNext = newItem;
3879  if(nextItem != VMA_NULL)
3880  {
3881  nextItem->pPrev = newItem;
3882  }
3883  else
3884  {
3885  VMA_HEAVY_ASSERT(m_pBack == pItem);
3886  m_pBack = newItem;
3887  }
3888  ++m_Count;
3889  return newItem;
3890  }
3891  else
3892  return PushFront();
3893 }
3894 
3895 template<typename T>
3896 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3897 {
3898  ItemType* const newItem = InsertBefore(pItem);
3899  newItem->Value = value;
3900  return newItem;
3901 }
3902 
3903 template<typename T>
3904 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3905 {
3906  ItemType* const newItem = InsertAfter(pItem);
3907  newItem->Value = value;
3908  return newItem;
3909 }
3910 
3911 template<typename T, typename AllocatorT>
3912 class VmaList
3913 {
3914  VMA_CLASS_NO_COPY(VmaList)
3915 public:
3916  class iterator
3917  {
3918  public:
3919  iterator() :
3920  m_pList(VMA_NULL),
3921  m_pItem(VMA_NULL)
3922  {
3923  }
3924 
3925  T& operator*() const
3926  {
3927  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3928  return m_pItem->Value;
3929  }
3930  T* operator->() const
3931  {
3932  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3933  return &m_pItem->Value;
3934  }
3935 
3936  iterator& operator++()
3937  {
3938  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3939  m_pItem = m_pItem->pNext;
3940  return *this;
3941  }
3942  iterator& operator--()
3943  {
3944  if(m_pItem != VMA_NULL)
3945  {
3946  m_pItem = m_pItem->pPrev;
3947  }
3948  else
3949  {
3950  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3951  m_pItem = m_pList->Back();
3952  }
3953  return *this;
3954  }
3955 
3956  iterator operator++(int)
3957  {
3958  iterator result = *this;
3959  ++*this;
3960  return result;
3961  }
3962  iterator operator--(int)
3963  {
3964  iterator result = *this;
3965  --*this;
3966  return result;
3967  }
3968 
3969  bool operator==(const iterator& rhs) const
3970  {
3971  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3972  return m_pItem == rhs.m_pItem;
3973  }
3974  bool operator!=(const iterator& rhs) const
3975  {
3976  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3977  return m_pItem != rhs.m_pItem;
3978  }
3979 
3980  private:
3981  VmaRawList<T>* m_pList;
3982  VmaListItem<T>* m_pItem;
3983 
3984  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3985  m_pList(pList),
3986  m_pItem(pItem)
3987  {
3988  }
3989 
3990  friend class VmaList<T, AllocatorT>;
3991  };
3992 
3993  class const_iterator
3994  {
3995  public:
3996  const_iterator() :
3997  m_pList(VMA_NULL),
3998  m_pItem(VMA_NULL)
3999  {
4000  }
4001 
4002  const_iterator(const iterator& src) :
4003  m_pList(src.m_pList),
4004  m_pItem(src.m_pItem)
4005  {
4006  }
4007 
4008  const T& operator*() const
4009  {
4010  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4011  return m_pItem->Value;
4012  }
4013  const T* operator->() const
4014  {
4015  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4016  return &m_pItem->Value;
4017  }
4018 
4019  const_iterator& operator++()
4020  {
4021  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4022  m_pItem = m_pItem->pNext;
4023  return *this;
4024  }
4025  const_iterator& operator--()
4026  {
4027  if(m_pItem != VMA_NULL)
4028  {
4029  m_pItem = m_pItem->pPrev;
4030  }
4031  else
4032  {
4033  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4034  m_pItem = m_pList->Back();
4035  }
4036  return *this;
4037  }
4038 
4039  const_iterator operator++(int)
4040  {
4041  const_iterator result = *this;
4042  ++*this;
4043  return result;
4044  }
4045  const_iterator operator--(int)
4046  {
4047  const_iterator result = *this;
4048  --*this;
4049  return result;
4050  }
4051 
4052  bool operator==(const const_iterator& rhs) const
4053  {
4054  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4055  return m_pItem == rhs.m_pItem;
4056  }
4057  bool operator!=(const const_iterator& rhs) const
4058  {
4059  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4060  return m_pItem != rhs.m_pItem;
4061  }
4062 
4063  private:
4064  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4065  m_pList(pList),
4066  m_pItem(pItem)
4067  {
4068  }
4069 
4070  const VmaRawList<T>* m_pList;
4071  const VmaListItem<T>* m_pItem;
4072 
4073  friend class VmaList<T, AllocatorT>;
4074  };
4075 
4076  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4077 
4078  bool empty() const { return m_RawList.IsEmpty(); }
4079  size_t size() const { return m_RawList.GetCount(); }
4080 
4081  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4082  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4083 
4084  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4085  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4086 
4087  void clear() { m_RawList.Clear(); }
4088  void push_back(const T& value) { m_RawList.PushBack(value); }
4089  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4090  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4091 
4092 private:
4093  VmaRawList<T> m_RawList;
4094 };
4095 
4096 #endif // #if VMA_USE_STL_LIST
4097 
4099 // class VmaMap
4100 
4101 // Unused in this version.
4102 #if 0
4103 
4104 #if VMA_USE_STL_UNORDERED_MAP
4105 
4106 #define VmaPair std::pair
4107 
4108 #define VMA_MAP_TYPE(KeyT, ValueT) \
4109  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4110 
4111 #else // #if VMA_USE_STL_UNORDERED_MAP
4112 
4113 template<typename T1, typename T2>
4114 struct VmaPair
4115 {
4116  T1 first;
4117  T2 second;
4118 
4119  VmaPair() : first(), second() { }
4120  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4121 };
4122 
4123 /* Class compatible with subset of interface of std::unordered_map.
4124 KeyT, ValueT must be POD because they will be stored in VmaVector.
4125 */
4126 template<typename KeyT, typename ValueT>
4127 class VmaMap
4128 {
4129 public:
4130  typedef VmaPair<KeyT, ValueT> PairType;
4131  typedef PairType* iterator;
4132 
4133  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4134 
4135  iterator begin() { return m_Vector.begin(); }
4136  iterator end() { return m_Vector.end(); }
4137 
4138  void insert(const PairType& pair);
4139  iterator find(const KeyT& key);
4140  void erase(iterator it);
4141 
4142 private:
4143  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4144 };
4145 
4146 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4147 
4148 template<typename FirstT, typename SecondT>
4149 struct VmaPairFirstLess
4150 {
4151  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4152  {
4153  return lhs.first < rhs.first;
4154  }
4155  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4156  {
4157  return lhs.first < rhsFirst;
4158  }
4159 };
4160 
4161 template<typename KeyT, typename ValueT>
4162 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4163 {
4164  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4165  m_Vector.data(),
4166  m_Vector.data() + m_Vector.size(),
4167  pair,
4168  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4169  VmaVectorInsert(m_Vector, indexToInsert, pair);
4170 }
4171 
4172 template<typename KeyT, typename ValueT>
4173 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4174 {
4175  PairType* it = VmaBinaryFindFirstNotLess(
4176  m_Vector.data(),
4177  m_Vector.data() + m_Vector.size(),
4178  key,
4179  VmaPairFirstLess<KeyT, ValueT>());
4180  if((it != m_Vector.end()) && (it->first == key))
4181  {
4182  return it;
4183  }
4184  else
4185  {
4186  return m_Vector.end();
4187  }
4188 }
4189 
4190 template<typename KeyT, typename ValueT>
4191 void VmaMap<KeyT, ValueT>::erase(iterator it)
4192 {
4193  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4194 }
4195 
4196 #endif // #if VMA_USE_STL_UNORDERED_MAP
4197 
4198 #endif // #if 0
4199 
4201 
4202 class VmaDeviceMemoryBlock;
4203 
4204 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4205 
4206 struct VmaAllocation_T
4207 {
4208  VMA_CLASS_NO_COPY(VmaAllocation_T)
4209 private:
4210  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4211 
4212  enum FLAGS
4213  {
4214  FLAG_USER_DATA_STRING = 0x01,
4215  };
4216 
4217 public:
4218  enum ALLOCATION_TYPE
4219  {
4220  ALLOCATION_TYPE_NONE,
4221  ALLOCATION_TYPE_BLOCK,
4222  ALLOCATION_TYPE_DEDICATED,
4223  };
4224 
4225  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4226  m_Alignment(1),
4227  m_Size(0),
4228  m_pUserData(VMA_NULL),
4229  m_LastUseFrameIndex(currentFrameIndex),
4230  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4231  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4232  m_MapCount(0),
4233  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4234  {
4235 #if VMA_STATS_STRING_ENABLED
4236  m_CreationFrameIndex = currentFrameIndex;
4237  m_BufferImageUsage = 0;
4238 #endif
4239  }
4240 
4241  ~VmaAllocation_T()
4242  {
4243  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4244 
4245  // Check if owned string was freed.
4246  VMA_ASSERT(m_pUserData == VMA_NULL);
4247  }
4248 
4249  void InitBlockAllocation(
4250  VmaPool hPool,
4251  VmaDeviceMemoryBlock* block,
4252  VkDeviceSize offset,
4253  VkDeviceSize alignment,
4254  VkDeviceSize size,
4255  VmaSuballocationType suballocationType,
4256  bool mapped,
4257  bool canBecomeLost)
4258  {
4259  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4260  VMA_ASSERT(block != VMA_NULL);
4261  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4262  m_Alignment = alignment;
4263  m_Size = size;
4264  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4265  m_SuballocationType = (uint8_t)suballocationType;
4266  m_BlockAllocation.m_hPool = hPool;
4267  m_BlockAllocation.m_Block = block;
4268  m_BlockAllocation.m_Offset = offset;
4269  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4270  }
4271 
4272  void InitLost()
4273  {
4274  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4275  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4276  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4277  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4278  m_BlockAllocation.m_Block = VMA_NULL;
4279  m_BlockAllocation.m_Offset = 0;
4280  m_BlockAllocation.m_CanBecomeLost = true;
4281  }
4282 
4283  void ChangeBlockAllocation(
4284  VmaAllocator hAllocator,
4285  VmaDeviceMemoryBlock* block,
4286  VkDeviceSize offset);
4287 
4288  // pMappedData not null means allocation is created with MAPPED flag.
4289  void InitDedicatedAllocation(
4290  uint32_t memoryTypeIndex,
4291  VkDeviceMemory hMemory,
4292  VmaSuballocationType suballocationType,
4293  void* pMappedData,
4294  VkDeviceSize size)
4295  {
4296  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4297  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4298  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4299  m_Alignment = 0;
4300  m_Size = size;
4301  m_SuballocationType = (uint8_t)suballocationType;
4302  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4303  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4304  m_DedicatedAllocation.m_hMemory = hMemory;
4305  m_DedicatedAllocation.m_pMappedData = pMappedData;
4306  }
4307 
4308  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4309  VkDeviceSize GetAlignment() const { return m_Alignment; }
4310  VkDeviceSize GetSize() const { return m_Size; }
4311  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4312  void* GetUserData() const { return m_pUserData; }
4313  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4314  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4315 
4316  VmaDeviceMemoryBlock* GetBlock() const
4317  {
4318  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4319  return m_BlockAllocation.m_Block;
4320  }
4321  VkDeviceSize GetOffset() const;
4322  VkDeviceMemory GetMemory() const;
4323  uint32_t GetMemoryTypeIndex() const;
4324  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4325  void* GetMappedData() const;
4326  bool CanBecomeLost() const;
4327  VmaPool GetPool() const;
4328 
4329  uint32_t GetLastUseFrameIndex() const
4330  {
4331  return m_LastUseFrameIndex.load();
4332  }
4333  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4334  {
4335  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4336  }
4337  /*
4338  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4339  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4340  - Else, returns false.
4341 
4342  If hAllocation is already lost, assert - you should not call it then.
4343  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4344  */
4345  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4346 
4347  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4348  {
4349  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4350  outInfo.blockCount = 1;
4351  outInfo.allocationCount = 1;
4352  outInfo.unusedRangeCount = 0;
4353  outInfo.usedBytes = m_Size;
4354  outInfo.unusedBytes = 0;
4355  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4356  outInfo.unusedRangeSizeMin = UINT64_MAX;
4357  outInfo.unusedRangeSizeMax = 0;
4358  }
4359 
4360  void BlockAllocMap();
4361  void BlockAllocUnmap();
4362  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4363  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4364 
4365 #if VMA_STATS_STRING_ENABLED
4366  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4367  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4368 
4369  void InitBufferImageUsage(uint32_t bufferImageUsage)
4370  {
4371  VMA_ASSERT(m_BufferImageUsage == 0);
4372  m_BufferImageUsage = bufferImageUsage;
4373  }
4374 
4375  void PrintParameters(class VmaJsonWriter& json) const;
4376 #endif
4377 
4378 private:
4379  VkDeviceSize m_Alignment;
4380  VkDeviceSize m_Size;
4381  void* m_pUserData;
4382  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4383  uint8_t m_Type; // ALLOCATION_TYPE
4384  uint8_t m_SuballocationType; // VmaSuballocationType
4385  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4386  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4387  uint8_t m_MapCount;
4388  uint8_t m_Flags; // enum FLAGS
4389 
4390  // Allocation out of VmaDeviceMemoryBlock.
4391  struct BlockAllocation
4392  {
4393  VmaPool m_hPool; // Null if belongs to general memory.
4394  VmaDeviceMemoryBlock* m_Block;
4395  VkDeviceSize m_Offset;
4396  bool m_CanBecomeLost;
4397  };
4398 
4399  // Allocation for an object that has its own private VkDeviceMemory.
4400  struct DedicatedAllocation
4401  {
4402  uint32_t m_MemoryTypeIndex;
4403  VkDeviceMemory m_hMemory;
4404  void* m_pMappedData; // Not null means memory is mapped.
4405  };
4406 
4407  union
4408  {
4409  // Allocation out of VmaDeviceMemoryBlock.
4410  BlockAllocation m_BlockAllocation;
4411  // Allocation for an object that has its own private VkDeviceMemory.
4412  DedicatedAllocation m_DedicatedAllocation;
4413  };
4414 
4415 #if VMA_STATS_STRING_ENABLED
4416  uint32_t m_CreationFrameIndex;
4417  uint32_t m_BufferImageUsage; // 0 if unknown.
4418 #endif
4419 
4420  void FreeUserDataString(VmaAllocator hAllocator);
4421 };
4422 
4423 /*
4424 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4425 allocated memory block or free.
4426 */
4427 struct VmaSuballocation
4428 {
4429  VkDeviceSize offset;
4430  VkDeviceSize size;
4431  VmaAllocation hAllocation;
4432  VmaSuballocationType type;
4433 };
4434 
4435 // Comparator for offsets.
4436 struct VmaSuballocationOffsetLess
4437 {
4438  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4439  {
4440  return lhs.offset < rhs.offset;
4441  }
4442 };
4443 struct VmaSuballocationOffsetGreater
4444 {
4445  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4446  {
4447  return lhs.offset > rhs.offset;
4448  }
4449 };
4450 
4451 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4452 
4453 // Cost of one additional allocation lost, as equivalent in bytes.
4454 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4455 
4456 /*
4457 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4458 
4459 If canMakeOtherLost was false:
4460 - item points to a FREE suballocation.
4461 - itemsToMakeLostCount is 0.
4462 
4463 If canMakeOtherLost was true:
4464 - item points to first of sequence of suballocations, which are either FREE,
4465  or point to VmaAllocations that can become lost.
4466 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4467  the requested allocation to succeed.
4468 */
4469 struct VmaAllocationRequest
4470 {
4471  VkDeviceSize offset;
4472  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4473  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4474  VmaSuballocationList::iterator item;
4475  size_t itemsToMakeLostCount;
4476 
4477  VkDeviceSize CalcCost() const
4478  {
4479  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4480  }
4481 };
4482 
4483 /*
4484 Data structure used for bookkeeping of allocations and unused ranges of memory
4485 in a single VkDeviceMemory block.
4486 */
4487 class VmaBlockMetadata
4488 {
4489 public:
4490  VmaBlockMetadata() : m_Size(0) { }
4491  virtual ~VmaBlockMetadata() { }
4492  virtual void Init(VkDeviceSize size) { m_Size = size; }
4493 
4494  // Validates all data structures inside this object. If not valid, returns false.
4495  virtual bool Validate() const = 0;
4496  VkDeviceSize GetSize() const { return m_Size; }
4497  virtual size_t GetAllocationCount() const = 0;
4498  virtual VkDeviceSize GetSumFreeSize() const = 0;
4499  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4500  // Returns true if this block is empty - contains only single free suballocation.
4501  virtual bool IsEmpty() const = 0;
4502 
4503  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4504  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4505 
4506 #if VMA_STATS_STRING_ENABLED
4507  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4508 #endif
4509 
4510  // Tries to find a place for suballocation with given parameters inside this block.
4511  // If succeeded, fills pAllocationRequest and returns true.
4512  // If failed, returns false.
4513  virtual bool CreateAllocationRequest(
4514  uint32_t currentFrameIndex,
4515  uint32_t frameInUseCount,
4516  VkDeviceSize bufferImageGranularity,
4517  VkDeviceSize allocSize,
4518  VkDeviceSize allocAlignment,
4519  bool upperAddress,
4520  VmaSuballocationType allocType,
4521  bool canMakeOtherLost,
4522  VmaAllocationRequest* pAllocationRequest) = 0;
4523 
4524  virtual bool MakeRequestedAllocationsLost(
4525  uint32_t currentFrameIndex,
4526  uint32_t frameInUseCount,
4527  VmaAllocationRequest* pAllocationRequest) = 0;
4528 
4529  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4530 
4531  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4532 
4533  // Makes actual allocation based on request. Request must already be checked and valid.
4534  virtual void Alloc(
4535  const VmaAllocationRequest& request,
4536  VmaSuballocationType type,
4537  VkDeviceSize allocSize,
4538  bool upperAddress,
4539  VmaAllocation hAllocation) = 0;
4540 
4541  // Frees suballocation assigned to given memory region.
4542  virtual void Free(const VmaAllocation allocation) = 0;
4543  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4544 
4545 protected:
4546 #if VMA_STATS_STRING_ENABLED
4547  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4548  VkDeviceSize unusedBytes,
4549  size_t allocationCount,
4550  size_t unusedRangeCount) const;
4551  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4552  VkDeviceSize offset,
4553  VmaAllocation hAllocation) const;
4554  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4555  VkDeviceSize offset,
4556  VkDeviceSize size) const;
4557  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4558 #endif
4559 
4560 private:
4561  VkDeviceSize m_Size;
4562 };
4563 
4564 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4565 {
4566  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4567 public:
4568  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4569  virtual ~VmaBlockMetadata_Generic();
4570  virtual void Init(VkDeviceSize size);
4571 
4572  virtual bool Validate() const;
4573  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4574  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4575  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4576  virtual bool IsEmpty() const;
4577 
4578  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4579  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4580 
4581 #if VMA_STATS_STRING_ENABLED
4582  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4583 #endif
4584 
4585  virtual bool CreateAllocationRequest(
4586  uint32_t currentFrameIndex,
4587  uint32_t frameInUseCount,
4588  VkDeviceSize bufferImageGranularity,
4589  VkDeviceSize allocSize,
4590  VkDeviceSize allocAlignment,
4591  bool upperAddress,
4592  VmaSuballocationType allocType,
4593  bool canMakeOtherLost,
4594  VmaAllocationRequest* pAllocationRequest);
4595 
4596  virtual bool MakeRequestedAllocationsLost(
4597  uint32_t currentFrameIndex,
4598  uint32_t frameInUseCount,
4599  VmaAllocationRequest* pAllocationRequest);
4600 
4601  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4602 
4603  virtual VkResult CheckCorruption(const void* pBlockData);
4604 
4605  virtual void Alloc(
4606  const VmaAllocationRequest& request,
4607  VmaSuballocationType type,
4608  VkDeviceSize allocSize,
4609  bool upperAddress,
4610  VmaAllocation hAllocation);
4611 
4612  virtual void Free(const VmaAllocation allocation);
4613  virtual void FreeAtOffset(VkDeviceSize offset);
4614 
4615 private:
4616  uint32_t m_FreeCount;
4617  VkDeviceSize m_SumFreeSize;
4618  VmaSuballocationList m_Suballocations;
4619  // Suballocations that are free and have size greater than certain threshold.
4620  // Sorted by size, ascending.
4621  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4622 
4623  bool ValidateFreeSuballocationList() const;
4624 
4625  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4626  // If yes, fills pOffset and returns true. If no, returns false.
4627  bool CheckAllocation(
4628  uint32_t currentFrameIndex,
4629  uint32_t frameInUseCount,
4630  VkDeviceSize bufferImageGranularity,
4631  VkDeviceSize allocSize,
4632  VkDeviceSize allocAlignment,
4633  VmaSuballocationType allocType,
4634  VmaSuballocationList::const_iterator suballocItem,
4635  bool canMakeOtherLost,
4636  VkDeviceSize* pOffset,
4637  size_t* itemsToMakeLostCount,
4638  VkDeviceSize* pSumFreeSize,
4639  VkDeviceSize* pSumItemSize) const;
4640  // Given free suballocation, it merges it with following one, which must also be free.
4641  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4642  // Releases given suballocation, making it free.
4643  // Merges it with adjacent free suballocations if applicable.
4644  // Returns iterator to new free suballocation at this place.
4645  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4646  // Given free suballocation, it inserts it into sorted list of
4647  // m_FreeSuballocationsBySize if it's suitable.
4648  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4649  // Given free suballocation, it removes it from sorted list of
4650  // m_FreeSuballocationsBySize if it's suitable.
4651  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4652 };
4653 
4654 /*
4655 Allocations and their references in internal data structure look like this:
4656 
4657 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4658 
4659  0 +-------+
4660  | |
4661  | |
4662  | |
4663  +-------+
4664  | Alloc | 1st[m_1stNullItemsBeginCount]
4665  +-------+
4666  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4667  +-------+
4668  | ... |
4669  +-------+
4670  | Alloc | 1st[1st.size() - 1]
4671  +-------+
4672  | |
4673  | |
4674  | |
4675 GetSize() +-------+
4676 
4677 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4678 
4679  0 +-------+
4680  | Alloc | 2nd[0]
4681  +-------+
4682  | Alloc | 2nd[1]
4683  +-------+
4684  | ... |
4685  +-------+
4686  | Alloc | 2nd[2nd.size() - 1]
4687  +-------+
4688  | |
4689  | |
4690  | |
4691  +-------+
4692  | Alloc | 1st[m_1stNullItemsBeginCount]
4693  +-------+
4694  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4695  +-------+
4696  | ... |
4697  +-------+
4698  | Alloc | 1st[1st.size() - 1]
4699  +-------+
4700  | |
4701 GetSize() +-------+
4702 
4703 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4704 
4705  0 +-------+
4706  | |
4707  | |
4708  | |
4709  +-------+
4710  | Alloc | 1st[m_1stNullItemsBeginCount]
4711  +-------+
4712  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4713  +-------+
4714  | ... |
4715  +-------+
4716  | Alloc | 1st[1st.size() - 1]
4717  +-------+
4718  | |
4719  | |
4720  | |
4721  +-------+
4722  | Alloc | 2nd[2nd.size() - 1]
4723  +-------+
4724  | ... |
4725  +-------+
4726  | Alloc | 2nd[1]
4727  +-------+
4728  | Alloc | 2nd[0]
4729 GetSize() +-------+
4730 
4731 */
4732 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4733 {
4734  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4735 public:
4736  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4737  virtual ~VmaBlockMetadata_Linear();
4738  virtual void Init(VkDeviceSize size);
4739 
4740  virtual bool Validate() const;
4741  virtual size_t GetAllocationCount() const;
4742  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4743  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4744  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4745 
4746  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4747  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4748 
4749 #if VMA_STATS_STRING_ENABLED
4750  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4751 #endif
4752 
4753  virtual bool CreateAllocationRequest(
4754  uint32_t currentFrameIndex,
4755  uint32_t frameInUseCount,
4756  VkDeviceSize bufferImageGranularity,
4757  VkDeviceSize allocSize,
4758  VkDeviceSize allocAlignment,
4759  bool upperAddress,
4760  VmaSuballocationType allocType,
4761  bool canMakeOtherLost,
4762  VmaAllocationRequest* pAllocationRequest);
4763 
4764  virtual bool MakeRequestedAllocationsLost(
4765  uint32_t currentFrameIndex,
4766  uint32_t frameInUseCount,
4767  VmaAllocationRequest* pAllocationRequest);
4768 
4769  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4770 
4771  virtual VkResult CheckCorruption(const void* pBlockData);
4772 
4773  virtual void Alloc(
4774  const VmaAllocationRequest& request,
4775  VmaSuballocationType type,
4776  VkDeviceSize allocSize,
4777  bool upperAddress,
4778  VmaAllocation hAllocation);
4779 
4780  virtual void Free(const VmaAllocation allocation);
4781  virtual void FreeAtOffset(VkDeviceSize offset);
4782 
4783 private:
4784  /*
4785  There are two suballocation vectors, used in ping-pong way.
4786  The one with index m_1stVectorIndex is called 1st.
4787  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4788  2nd can be non-empty only when 1st is not empty.
4789  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4790  */
4791  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4792 
4793  enum SECOND_VECTOR_MODE
4794  {
4795  SECOND_VECTOR_EMPTY,
4796  /*
4797  Suballocations in 2nd vector are created later than the ones in 1st, but they
4798  all have smaller offset.
4799  */
4800  SECOND_VECTOR_RING_BUFFER,
4801  /*
4802  Suballocations in 2nd vector are upper side of double stack.
4803  They all have offsets higher than those in 1st vector.
4804  Top of this stack means smaller offsets, but higher indices in this vector.
4805  */
4806  SECOND_VECTOR_DOUBLE_STACK,
4807  };
4808 
4809  VkDeviceSize m_SumFreeSize;
4810  SuballocationVectorType m_Suballocations0, m_Suballocations1;
4811  uint32_t m_1stVectorIndex;
4812  SECOND_VECTOR_MODE m_2ndVectorMode;
4813 
4814  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4815  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4816  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4817  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4818 
4819  // Number of items in 1st vector with hAllocation = null at the beginning.
4820  size_t m_1stNullItemsBeginCount;
4821  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
4822  size_t m_1stNullItemsMiddleCount;
4823  // Number of items in 2nd vector with hAllocation = null.
4824  size_t m_2ndNullItemsCount;
4825 
4826  bool ShouldCompact1st() const;
4827  void CleanupAfterFree();
4828 };
4829 
4830 /*
4831 Represents a single block of device memory (`VkDeviceMemory`) with all the
4832 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4833 
4834 Thread-safety: This class must be externally synchronized.
4835 */
4836 class VmaDeviceMemoryBlock
4837 {
4838  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4839 public:
4840  VmaBlockMetadata* m_pMetadata;
4841 
4842  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4843 
4844  ~VmaDeviceMemoryBlock()
4845  {
4846  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4847  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4848  }
4849 
4850  // Always call after construction.
4851  void Init(
4852  VmaAllocator hAllocator,
4853  uint32_t newMemoryTypeIndex,
4854  VkDeviceMemory newMemory,
4855  VkDeviceSize newSize,
4856  uint32_t id,
4857  bool linearAlgorithm);
4858  // Always call before destruction.
4859  void Destroy(VmaAllocator allocator);
4860 
4861  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4862  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4863  uint32_t GetId() const { return m_Id; }
4864  void* GetMappedData() const { return m_pMappedData; }
4865 
4866  // Validates all data structures inside this object. If not valid, returns false.
4867  bool Validate() const;
4868 
4869  VkResult CheckCorruption(VmaAllocator hAllocator);
4870 
4871  // ppData can be null.
4872  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4873  void Unmap(VmaAllocator hAllocator, uint32_t count);
4874 
4875  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4876  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4877 
4878  VkResult BindBufferMemory(
4879  const VmaAllocator hAllocator,
4880  const VmaAllocation hAllocation,
4881  VkBuffer hBuffer);
4882  VkResult BindImageMemory(
4883  const VmaAllocator hAllocator,
4884  const VmaAllocation hAllocation,
4885  VkImage hImage);
4886 
4887 private:
4888  uint32_t m_MemoryTypeIndex;
4889  uint32_t m_Id;
4890  VkDeviceMemory m_hMemory;
4891 
4892  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4893  // Also protects m_MapCount, m_pMappedData.
4894  VMA_MUTEX m_Mutex;
4895  uint32_t m_MapCount;
4896  void* m_pMappedData;
4897 };
4898 
4899 struct VmaPointerLess
4900 {
4901  bool operator()(const void* lhs, const void* rhs) const
4902  {
4903  return lhs < rhs;
4904  }
4905 };
4906 
4907 class VmaDefragmentator;
4908 
4909 /*
4910 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4911 Vulkan memory type.
4912 
4913 Synchronized internally with a mutex.
4914 */
4915 struct VmaBlockVector
4916 {
4917  VMA_CLASS_NO_COPY(VmaBlockVector)
4918 public:
4919  VmaBlockVector(
4920  VmaAllocator hAllocator,
4921  uint32_t memoryTypeIndex,
4922  VkDeviceSize preferredBlockSize,
4923  size_t minBlockCount,
4924  size_t maxBlockCount,
4925  VkDeviceSize bufferImageGranularity,
4926  uint32_t frameInUseCount,
4927  bool isCustomPool,
4928  bool linearAlgorithm);
4929  ~VmaBlockVector();
4930 
4931  VkResult CreateMinBlocks();
4932 
4933  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4934  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4935  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4936  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4937  bool UsesLinearAlgorithm() const { return m_LinearAlgorithm; }
4938 
4939  void GetPoolStats(VmaPoolStats* pStats);
4940 
4941  bool IsEmpty() const { return m_Blocks.empty(); }
4942  bool IsCorruptionDetectionEnabled() const;
4943 
4944  VkResult Allocate(
4945  VmaPool hCurrentPool,
4946  uint32_t currentFrameIndex,
4947  VkDeviceSize size,
4948  VkDeviceSize alignment,
4949  const VmaAllocationCreateInfo& createInfo,
4950  VmaSuballocationType suballocType,
4951  VmaAllocation* pAllocation);
4952 
4953  void Free(
4954  VmaAllocation hAllocation);
4955 
4956  // Adds statistics of this BlockVector to pStats.
4957  void AddStats(VmaStats* pStats);
4958 
4959 #if VMA_STATS_STRING_ENABLED
4960  void PrintDetailedMap(class VmaJsonWriter& json);
4961 #endif
4962 
4963  void MakePoolAllocationsLost(
4964  uint32_t currentFrameIndex,
4965  size_t* pLostAllocationCount);
4966  VkResult CheckCorruption();
4967 
4968  VmaDefragmentator* EnsureDefragmentator(
4969  VmaAllocator hAllocator,
4970  uint32_t currentFrameIndex);
4971 
4972  VkResult Defragment(
4973  VmaDefragmentationStats* pDefragmentationStats,
4974  VkDeviceSize& maxBytesToMove,
4975  uint32_t& maxAllocationsToMove);
4976 
4977  void DestroyDefragmentator();
4978 
4979 private:
4980  friend class VmaDefragmentator;
4981 
4982  const VmaAllocator m_hAllocator;
4983  const uint32_t m_MemoryTypeIndex;
4984  const VkDeviceSize m_PreferredBlockSize;
4985  const size_t m_MinBlockCount;
4986  const size_t m_MaxBlockCount;
4987  const VkDeviceSize m_BufferImageGranularity;
4988  const uint32_t m_FrameInUseCount;
4989  const bool m_IsCustomPool;
4990  const bool m_LinearAlgorithm;
4991  bool m_HasEmptyBlock;
4992  VMA_MUTEX m_Mutex;
4993  // Incrementally sorted by sumFreeSize, ascending.
4994  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4995  /* There can be at most one allocation that is completely empty - a
4996  hysteresis to avoid pessimistic case of alternating creation and destruction
4997  of a VkDeviceMemory. */
4998  VmaDefragmentator* m_pDefragmentator;
4999  uint32_t m_NextBlockId;
5000 
5001  VkDeviceSize CalcMaxBlockSize() const;
5002 
5003  // Finds and removes given block from vector.
5004  void Remove(VmaDeviceMemoryBlock* pBlock);
5005 
5006  // Performs single step in sorting m_Blocks. They may not be fully sorted
5007  // after this call.
5008  void IncrementallySortBlocks();
5009 
5010  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5011 };
5012 
5013 struct VmaPool_T
5014 {
5015  VMA_CLASS_NO_COPY(VmaPool_T)
5016 public:
5017  VmaBlockVector m_BlockVector;
5018 
5019  VmaPool_T(
5020  VmaAllocator hAllocator,
5021  const VmaPoolCreateInfo& createInfo);
5022  ~VmaPool_T();
5023 
5024  uint32_t GetId() const { return m_Id; }
5025  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5026 
5027 #if VMA_STATS_STRING_ENABLED
5028  //void PrintDetailedMap(class VmaStringBuilder& sb);
5029 #endif
5030 
5031 private:
5032  uint32_t m_Id;
5033 };
5034 
5035 class VmaDefragmentator
5036 {
5037  VMA_CLASS_NO_COPY(VmaDefragmentator)
5038 private:
5039  const VmaAllocator m_hAllocator;
5040  VmaBlockVector* const m_pBlockVector;
5041  uint32_t m_CurrentFrameIndex;
5042  VkDeviceSize m_BytesMoved;
5043  uint32_t m_AllocationsMoved;
5044 
5045  struct AllocationInfo
5046  {
5047  VmaAllocation m_hAllocation;
5048  VkBool32* m_pChanged;
5049 
5050  AllocationInfo() :
5051  m_hAllocation(VK_NULL_HANDLE),
5052  m_pChanged(VMA_NULL)
5053  {
5054  }
5055  };
5056 
5057  struct AllocationInfoSizeGreater
5058  {
5059  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5060  {
5061  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5062  }
5063  };
5064 
5065  // Used between AddAllocation and Defragment.
5066  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5067 
5068  struct BlockInfo
5069  {
5070  VmaDeviceMemoryBlock* m_pBlock;
5071  bool m_HasNonMovableAllocations;
5072  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5073 
5074  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5075  m_pBlock(VMA_NULL),
5076  m_HasNonMovableAllocations(true),
5077  m_Allocations(pAllocationCallbacks),
5078  m_pMappedDataForDefragmentation(VMA_NULL)
5079  {
5080  }
5081 
5082  void CalcHasNonMovableAllocations()
5083  {
5084  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5085  const size_t defragmentAllocCount = m_Allocations.size();
5086  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5087  }
5088 
5089  void SortAllocationsBySizeDescecnding()
5090  {
5091  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5092  }
5093 
5094  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5095  void Unmap(VmaAllocator hAllocator);
5096 
5097  private:
5098  // Not null if mapped for defragmentation only, not originally mapped.
5099  void* m_pMappedDataForDefragmentation;
5100  };
5101 
5102  struct BlockPointerLess
5103  {
5104  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5105  {
5106  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5107  }
5108  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5109  {
5110  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5111  }
5112  };
5113 
5114  // 1. Blocks with some non-movable allocations go first.
5115  // 2. Blocks with smaller sumFreeSize go first.
5116  struct BlockInfoCompareMoveDestination
5117  {
5118  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5119  {
5120  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5121  {
5122  return true;
5123  }
5124  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5125  {
5126  return false;
5127  }
5128  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5129  {
5130  return true;
5131  }
5132  return false;
5133  }
5134  };
5135 
5136  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5137  BlockInfoVector m_Blocks;
5138 
5139  VkResult DefragmentRound(
5140  VkDeviceSize maxBytesToMove,
5141  uint32_t maxAllocationsToMove);
5142 
5143  static bool MoveMakesSense(
5144  size_t dstBlockIndex, VkDeviceSize dstOffset,
5145  size_t srcBlockIndex, VkDeviceSize srcOffset);
5146 
5147 public:
5148  VmaDefragmentator(
5149  VmaAllocator hAllocator,
5150  VmaBlockVector* pBlockVector,
5151  uint32_t currentFrameIndex);
5152 
5153  ~VmaDefragmentator();
5154 
5155  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5156  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5157 
5158  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5159 
5160  VkResult Defragment(
5161  VkDeviceSize maxBytesToMove,
5162  uint32_t maxAllocationsToMove);
5163 };
5164 
5165 #if VMA_RECORDING_ENABLED
5166 
5167 class VmaRecorder
5168 {
5169 public:
5170  VmaRecorder();
5171  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5172  void WriteConfiguration(
5173  const VkPhysicalDeviceProperties& devProps,
5174  const VkPhysicalDeviceMemoryProperties& memProps,
5175  bool dedicatedAllocationExtensionEnabled);
5176  ~VmaRecorder();
5177 
5178  void RecordCreateAllocator(uint32_t frameIndex);
5179  void RecordDestroyAllocator(uint32_t frameIndex);
5180  void RecordCreatePool(uint32_t frameIndex,
5181  const VmaPoolCreateInfo& createInfo,
5182  VmaPool pool);
5183  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5184  void RecordAllocateMemory(uint32_t frameIndex,
5185  const VkMemoryRequirements& vkMemReq,
5186  const VmaAllocationCreateInfo& createInfo,
5187  VmaAllocation allocation);
5188  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5189  const VkMemoryRequirements& vkMemReq,
5190  bool requiresDedicatedAllocation,
5191  bool prefersDedicatedAllocation,
5192  const VmaAllocationCreateInfo& createInfo,
5193  VmaAllocation allocation);
5194  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5195  const VkMemoryRequirements& vkMemReq,
5196  bool requiresDedicatedAllocation,
5197  bool prefersDedicatedAllocation,
5198  const VmaAllocationCreateInfo& createInfo,
5199  VmaAllocation allocation);
5200  void RecordFreeMemory(uint32_t frameIndex,
5201  VmaAllocation allocation);
5202  void RecordSetAllocationUserData(uint32_t frameIndex,
5203  VmaAllocation allocation,
5204  const void* pUserData);
5205  void RecordCreateLostAllocation(uint32_t frameIndex,
5206  VmaAllocation allocation);
5207  void RecordMapMemory(uint32_t frameIndex,
5208  VmaAllocation allocation);
5209  void RecordUnmapMemory(uint32_t frameIndex,
5210  VmaAllocation allocation);
5211  void RecordFlushAllocation(uint32_t frameIndex,
5212  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5213  void RecordInvalidateAllocation(uint32_t frameIndex,
5214  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5215  void RecordCreateBuffer(uint32_t frameIndex,
5216  const VkBufferCreateInfo& bufCreateInfo,
5217  const VmaAllocationCreateInfo& allocCreateInfo,
5218  VmaAllocation allocation);
5219  void RecordCreateImage(uint32_t frameIndex,
5220  const VkImageCreateInfo& imageCreateInfo,
5221  const VmaAllocationCreateInfo& allocCreateInfo,
5222  VmaAllocation allocation);
5223  void RecordDestroyBuffer(uint32_t frameIndex,
5224  VmaAllocation allocation);
5225  void RecordDestroyImage(uint32_t frameIndex,
5226  VmaAllocation allocation);
5227  void RecordTouchAllocation(uint32_t frameIndex,
5228  VmaAllocation allocation);
5229  void RecordGetAllocationInfo(uint32_t frameIndex,
5230  VmaAllocation allocation);
5231  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5232  VmaPool pool);
5233 
5234 private:
5235  struct CallParams
5236  {
5237  uint32_t threadId;
5238  double time;
5239  };
5240 
5241  class UserDataString
5242  {
5243  public:
5244  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5245  const char* GetString() const { return m_Str; }
5246 
5247  private:
5248  char m_PtrStr[17];
5249  const char* m_Str;
5250  };
5251 
5252  bool m_UseMutex;
5253  VmaRecordFlags m_Flags;
5254  FILE* m_File;
5255  VMA_MUTEX m_FileMutex;
5256  int64_t m_Freq;
5257  int64_t m_StartCounter;
5258 
5259  void GetBasicParams(CallParams& outParams);
5260  void Flush();
5261 };
5262 
5263 #endif // #if VMA_RECORDING_ENABLED
5264 
5265 // Main allocator object.
5266 struct VmaAllocator_T
5267 {
5268  VMA_CLASS_NO_COPY(VmaAllocator_T)
5269 public:
5270  bool m_UseMutex;
5271  bool m_UseKhrDedicatedAllocation;
5272  VkDevice m_hDevice;
5273  bool m_AllocationCallbacksSpecified;
5274  VkAllocationCallbacks m_AllocationCallbacks;
5275  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5276 
5277  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5278  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5279  VMA_MUTEX m_HeapSizeLimitMutex;
5280 
5281  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5282  VkPhysicalDeviceMemoryProperties m_MemProps;
5283 
5284  // Default pools.
5285  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5286 
5287  // Each vector is sorted by memory (handle value).
5288  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5289  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5290  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5291 
5292  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5293  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5294  ~VmaAllocator_T();
5295 
5296  const VkAllocationCallbacks* GetAllocationCallbacks() const
5297  {
5298  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5299  }
5300  const VmaVulkanFunctions& GetVulkanFunctions() const
5301  {
5302  return m_VulkanFunctions;
5303  }
5304 
5305  VkDeviceSize GetBufferImageGranularity() const
5306  {
5307  return VMA_MAX(
5308  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5309  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5310  }
5311 
5312  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5313  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5314 
5315  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5316  {
5317  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5318  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5319  }
5320  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5321  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5322  {
5323  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5324  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5325  }
5326  // Minimum alignment for all allocations in specific memory type.
5327  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5328  {
5329  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5330  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5331  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5332  }
5333 
5334  bool IsIntegratedGpu() const
5335  {
5336  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5337  }
5338 
5339 #if VMA_RECORDING_ENABLED
5340  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5341 #endif
5342 
5343  void GetBufferMemoryRequirements(
5344  VkBuffer hBuffer,
5345  VkMemoryRequirements& memReq,
5346  bool& requiresDedicatedAllocation,
5347  bool& prefersDedicatedAllocation) const;
5348  void GetImageMemoryRequirements(
5349  VkImage hImage,
5350  VkMemoryRequirements& memReq,
5351  bool& requiresDedicatedAllocation,
5352  bool& prefersDedicatedAllocation) const;
5353 
5354  // Main allocation function.
5355  VkResult AllocateMemory(
5356  const VkMemoryRequirements& vkMemReq,
5357  bool requiresDedicatedAllocation,
5358  bool prefersDedicatedAllocation,
5359  VkBuffer dedicatedBuffer,
5360  VkImage dedicatedImage,
5361  const VmaAllocationCreateInfo& createInfo,
5362  VmaSuballocationType suballocType,
5363  VmaAllocation* pAllocation);
5364 
5365  // Main deallocation function.
5366  void FreeMemory(const VmaAllocation allocation);
5367 
5368  void CalculateStats(VmaStats* pStats);
5369 
5370 #if VMA_STATS_STRING_ENABLED
5371  void PrintDetailedMap(class VmaJsonWriter& json);
5372 #endif
5373 
5374  VkResult Defragment(
5375  VmaAllocation* pAllocations,
5376  size_t allocationCount,
5377  VkBool32* pAllocationsChanged,
5378  const VmaDefragmentationInfo* pDefragmentationInfo,
5379  VmaDefragmentationStats* pDefragmentationStats);
5380 
5381  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5382  bool TouchAllocation(VmaAllocation hAllocation);
5383 
5384  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5385  void DestroyPool(VmaPool pool);
5386  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5387 
5388  void SetCurrentFrameIndex(uint32_t frameIndex);
5389  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5390 
5391  void MakePoolAllocationsLost(
5392  VmaPool hPool,
5393  size_t* pLostAllocationCount);
5394  VkResult CheckPoolCorruption(VmaPool hPool);
5395  VkResult CheckCorruption(uint32_t memoryTypeBits);
5396 
5397  void CreateLostAllocation(VmaAllocation* pAllocation);
5398 
5399  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5400  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5401 
5402  VkResult Map(VmaAllocation hAllocation, void** ppData);
5403  void Unmap(VmaAllocation hAllocation);
5404 
5405  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5406  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5407 
5408  void FlushOrInvalidateAllocation(
5409  VmaAllocation hAllocation,
5410  VkDeviceSize offset, VkDeviceSize size,
5411  VMA_CACHE_OPERATION op);
5412 
5413  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5414 
5415 private:
5416  VkDeviceSize m_PreferredLargeHeapBlockSize;
5417 
5418  VkPhysicalDevice m_PhysicalDevice;
5419  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5420 
5421  VMA_MUTEX m_PoolsMutex;
5422  // Protected by m_PoolsMutex. Sorted by pointer value.
5423  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5424  uint32_t m_NextPoolId;
5425 
5426  VmaVulkanFunctions m_VulkanFunctions;
5427 
5428 #if VMA_RECORDING_ENABLED
5429  VmaRecorder* m_pRecorder;
5430 #endif
5431 
5432  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5433 
5434  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5435 
5436  VkResult AllocateMemoryOfType(
5437  VkDeviceSize size,
5438  VkDeviceSize alignment,
5439  bool dedicatedAllocation,
5440  VkBuffer dedicatedBuffer,
5441  VkImage dedicatedImage,
5442  const VmaAllocationCreateInfo& createInfo,
5443  uint32_t memTypeIndex,
5444  VmaSuballocationType suballocType,
5445  VmaAllocation* pAllocation);
5446 
5447  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5448  VkResult AllocateDedicatedMemory(
5449  VkDeviceSize size,
5450  VmaSuballocationType suballocType,
5451  uint32_t memTypeIndex,
5452  bool map,
5453  bool isUserDataString,
5454  void* pUserData,
5455  VkBuffer dedicatedBuffer,
5456  VkImage dedicatedImage,
5457  VmaAllocation* pAllocation);
5458 
5459  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5460  void FreeDedicatedMemory(VmaAllocation allocation);
5461 };
5462 
5464 // Memory allocation #2 after VmaAllocator_T definition
5465 
5466 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5467 {
5468  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5469 }
5470 
5471 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5472 {
5473  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5474 }
5475 
5476 template<typename T>
5477 static T* VmaAllocate(VmaAllocator hAllocator)
5478 {
5479  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5480 }
5481 
5482 template<typename T>
5483 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5484 {
5485  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5486 }
5487 
5488 template<typename T>
5489 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5490 {
5491  if(ptr != VMA_NULL)
5492  {
5493  ptr->~T();
5494  VmaFree(hAllocator, ptr);
5495  }
5496 }
5497 
5498 template<typename T>
5499 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5500 {
5501  if(ptr != VMA_NULL)
5502  {
5503  for(size_t i = count; i--; )
5504  ptr[i].~T();
5505  VmaFree(hAllocator, ptr);
5506  }
5507 }
5508 
5510 // VmaStringBuilder
5511 
5512 #if VMA_STATS_STRING_ENABLED
5513 
5514 class VmaStringBuilder
5515 {
5516 public:
5517  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5518  size_t GetLength() const { return m_Data.size(); }
5519  const char* GetData() const { return m_Data.data(); }
5520 
5521  void Add(char ch) { m_Data.push_back(ch); }
5522  void Add(const char* pStr);
5523  void AddNewLine() { Add('\n'); }
5524  void AddNumber(uint32_t num);
5525  void AddNumber(uint64_t num);
5526  void AddPointer(const void* ptr);
5527 
5528 private:
5529  VmaVector< char, VmaStlAllocator<char> > m_Data;
5530 };
5531 
5532 void VmaStringBuilder::Add(const char* pStr)
5533 {
5534  const size_t strLen = strlen(pStr);
5535  if(strLen > 0)
5536  {
5537  const size_t oldCount = m_Data.size();
5538  m_Data.resize(oldCount + strLen);
5539  memcpy(m_Data.data() + oldCount, pStr, strLen);
5540  }
5541 }
5542 
5543 void VmaStringBuilder::AddNumber(uint32_t num)
5544 {
5545  char buf[11];
5546  VmaUint32ToStr(buf, sizeof(buf), num);
5547  Add(buf);
5548 }
5549 
5550 void VmaStringBuilder::AddNumber(uint64_t num)
5551 {
5552  char buf[21];
5553  VmaUint64ToStr(buf, sizeof(buf), num);
5554  Add(buf);
5555 }
5556 
5557 void VmaStringBuilder::AddPointer(const void* ptr)
5558 {
5559  char buf[21];
5560  VmaPtrToStr(buf, sizeof(buf), ptr);
5561  Add(buf);
5562 }
5563 
5564 #endif // #if VMA_STATS_STRING_ENABLED
5565 
5567 // VmaJsonWriter
5568 
5569 #if VMA_STATS_STRING_ENABLED
5570 
5571 class VmaJsonWriter
5572 {
5573  VMA_CLASS_NO_COPY(VmaJsonWriter)
5574 public:
5575  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5576  ~VmaJsonWriter();
5577 
5578  void BeginObject(bool singleLine = false);
5579  void EndObject();
5580 
5581  void BeginArray(bool singleLine = false);
5582  void EndArray();
5583 
5584  void WriteString(const char* pStr);
5585  void BeginString(const char* pStr = VMA_NULL);
5586  void ContinueString(const char* pStr);
5587  void ContinueString(uint32_t n);
5588  void ContinueString(uint64_t n);
5589  void ContinueString_Pointer(const void* ptr);
5590  void EndString(const char* pStr = VMA_NULL);
5591 
5592  void WriteNumber(uint32_t n);
5593  void WriteNumber(uint64_t n);
5594  void WriteBool(bool b);
5595  void WriteNull();
5596 
5597 private:
5598  static const char* const INDENT;
5599 
5600  enum COLLECTION_TYPE
5601  {
5602  COLLECTION_TYPE_OBJECT,
5603  COLLECTION_TYPE_ARRAY,
5604  };
5605  struct StackItem
5606  {
5607  COLLECTION_TYPE type;
5608  uint32_t valueCount;
5609  bool singleLineMode;
5610  };
5611 
5612  VmaStringBuilder& m_SB;
5613  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5614  bool m_InsideString;
5615 
5616  void BeginValue(bool isString);
5617  void WriteIndent(bool oneLess = false);
5618 };
5619 
5620 const char* const VmaJsonWriter::INDENT = " ";
5621 
5622 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5623  m_SB(sb),
5624  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5625  m_InsideString(false)
5626 {
5627 }
5628 
5629 VmaJsonWriter::~VmaJsonWriter()
5630 {
5631  VMA_ASSERT(!m_InsideString);
5632  VMA_ASSERT(m_Stack.empty());
5633 }
5634 
5635 void VmaJsonWriter::BeginObject(bool singleLine)
5636 {
5637  VMA_ASSERT(!m_InsideString);
5638 
5639  BeginValue(false);
5640  m_SB.Add('{');
5641 
5642  StackItem item;
5643  item.type = COLLECTION_TYPE_OBJECT;
5644  item.valueCount = 0;
5645  item.singleLineMode = singleLine;
5646  m_Stack.push_back(item);
5647 }
5648 
5649 void VmaJsonWriter::EndObject()
5650 {
5651  VMA_ASSERT(!m_InsideString);
5652 
5653  WriteIndent(true);
5654  m_SB.Add('}');
5655 
5656  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5657  m_Stack.pop_back();
5658 }
5659 
5660 void VmaJsonWriter::BeginArray(bool singleLine)
5661 {
5662  VMA_ASSERT(!m_InsideString);
5663 
5664  BeginValue(false);
5665  m_SB.Add('[');
5666 
5667  StackItem item;
5668  item.type = COLLECTION_TYPE_ARRAY;
5669  item.valueCount = 0;
5670  item.singleLineMode = singleLine;
5671  m_Stack.push_back(item);
5672 }
5673 
5674 void VmaJsonWriter::EndArray()
5675 {
5676  VMA_ASSERT(!m_InsideString);
5677 
5678  WriteIndent(true);
5679  m_SB.Add(']');
5680 
5681  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5682  m_Stack.pop_back();
5683 }
5684 
5685 void VmaJsonWriter::WriteString(const char* pStr)
5686 {
5687  BeginString(pStr);
5688  EndString();
5689 }
5690 
5691 void VmaJsonWriter::BeginString(const char* pStr)
5692 {
5693  VMA_ASSERT(!m_InsideString);
5694 
5695  BeginValue(true);
5696  m_SB.Add('"');
5697  m_InsideString = true;
5698  if(pStr != VMA_NULL && pStr[0] != '\0')
5699  {
5700  ContinueString(pStr);
5701  }
5702 }
5703 
5704 void VmaJsonWriter::ContinueString(const char* pStr)
5705 {
5706  VMA_ASSERT(m_InsideString);
5707 
5708  const size_t strLen = strlen(pStr);
5709  for(size_t i = 0; i < strLen; ++i)
5710  {
5711  char ch = pStr[i];
5712  if(ch == '\\')
5713  {
5714  m_SB.Add("\\\\");
5715  }
5716  else if(ch == '"')
5717  {
5718  m_SB.Add("\\\"");
5719  }
5720  else if(ch >= 32)
5721  {
5722  m_SB.Add(ch);
5723  }
5724  else switch(ch)
5725  {
5726  case '\b':
5727  m_SB.Add("\\b");
5728  break;
5729  case '\f':
5730  m_SB.Add("\\f");
5731  break;
5732  case '\n':
5733  m_SB.Add("\\n");
5734  break;
5735  case '\r':
5736  m_SB.Add("\\r");
5737  break;
5738  case '\t':
5739  m_SB.Add("\\t");
5740  break;
5741  default:
5742  VMA_ASSERT(0 && "Character not currently supported.");
5743  break;
5744  }
5745  }
5746 }
5747 
5748 void VmaJsonWriter::ContinueString(uint32_t n)
5749 {
5750  VMA_ASSERT(m_InsideString);
5751  m_SB.AddNumber(n);
5752 }
5753 
5754 void VmaJsonWriter::ContinueString(uint64_t n)
5755 {
5756  VMA_ASSERT(m_InsideString);
5757  m_SB.AddNumber(n);
5758 }
5759 
5760 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5761 {
5762  VMA_ASSERT(m_InsideString);
5763  m_SB.AddPointer(ptr);
5764 }
5765 
5766 void VmaJsonWriter::EndString(const char* pStr)
5767 {
5768  VMA_ASSERT(m_InsideString);
5769  if(pStr != VMA_NULL && pStr[0] != '\0')
5770  {
5771  ContinueString(pStr);
5772  }
5773  m_SB.Add('"');
5774  m_InsideString = false;
5775 }
5776 
5777 void VmaJsonWriter::WriteNumber(uint32_t n)
5778 {
5779  VMA_ASSERT(!m_InsideString);
5780  BeginValue(false);
5781  m_SB.AddNumber(n);
5782 }
5783 
5784 void VmaJsonWriter::WriteNumber(uint64_t n)
5785 {
5786  VMA_ASSERT(!m_InsideString);
5787  BeginValue(false);
5788  m_SB.AddNumber(n);
5789 }
5790 
5791 void VmaJsonWriter::WriteBool(bool b)
5792 {
5793  VMA_ASSERT(!m_InsideString);
5794  BeginValue(false);
5795  m_SB.Add(b ? "true" : "false");
5796 }
5797 
5798 void VmaJsonWriter::WriteNull()
5799 {
5800  VMA_ASSERT(!m_InsideString);
5801  BeginValue(false);
5802  m_SB.Add("null");
5803 }
5804 
5805 void VmaJsonWriter::BeginValue(bool isString)
5806 {
5807  if(!m_Stack.empty())
5808  {
5809  StackItem& currItem = m_Stack.back();
5810  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5811  currItem.valueCount % 2 == 0)
5812  {
5813  VMA_ASSERT(isString);
5814  }
5815 
5816  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5817  currItem.valueCount % 2 != 0)
5818  {
5819  m_SB.Add(": ");
5820  }
5821  else if(currItem.valueCount > 0)
5822  {
5823  m_SB.Add(", ");
5824  WriteIndent();
5825  }
5826  else
5827  {
5828  WriteIndent();
5829  }
5830  ++currItem.valueCount;
5831  }
5832 }
5833 
5834 void VmaJsonWriter::WriteIndent(bool oneLess)
5835 {
5836  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5837  {
5838  m_SB.AddNewLine();
5839 
5840  size_t count = m_Stack.size();
5841  if(count > 0 && oneLess)
5842  {
5843  --count;
5844  }
5845  for(size_t i = 0; i < count; ++i)
5846  {
5847  m_SB.Add(INDENT);
5848  }
5849  }
5850 }
5851 
5852 #endif // #if VMA_STATS_STRING_ENABLED
5853 
5855 
5856 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5857 {
5858  if(IsUserDataString())
5859  {
5860  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5861 
5862  FreeUserDataString(hAllocator);
5863 
5864  if(pUserData != VMA_NULL)
5865  {
5866  const char* const newStrSrc = (char*)pUserData;
5867  const size_t newStrLen = strlen(newStrSrc);
5868  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5869  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5870  m_pUserData = newStrDst;
5871  }
5872  }
5873  else
5874  {
5875  m_pUserData = pUserData;
5876  }
5877 }
5878 
5879 void VmaAllocation_T::ChangeBlockAllocation(
5880  VmaAllocator hAllocator,
5881  VmaDeviceMemoryBlock* block,
5882  VkDeviceSize offset)
5883 {
5884  VMA_ASSERT(block != VMA_NULL);
5885  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5886 
5887  // Move mapping reference counter from old block to new block.
5888  if(block != m_BlockAllocation.m_Block)
5889  {
5890  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5891  if(IsPersistentMap())
5892  ++mapRefCount;
5893  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5894  block->Map(hAllocator, mapRefCount, VMA_NULL);
5895  }
5896 
5897  m_BlockAllocation.m_Block = block;
5898  m_BlockAllocation.m_Offset = offset;
5899 }
5900 
5901 VkDeviceSize VmaAllocation_T::GetOffset() const
5902 {
5903  switch(m_Type)
5904  {
5905  case ALLOCATION_TYPE_BLOCK:
5906  return m_BlockAllocation.m_Offset;
5907  case ALLOCATION_TYPE_DEDICATED:
5908  return 0;
5909  default:
5910  VMA_ASSERT(0);
5911  return 0;
5912  }
5913 }
5914 
5915 VkDeviceMemory VmaAllocation_T::GetMemory() const
5916 {
5917  switch(m_Type)
5918  {
5919  case ALLOCATION_TYPE_BLOCK:
5920  return m_BlockAllocation.m_Block->GetDeviceMemory();
5921  case ALLOCATION_TYPE_DEDICATED:
5922  return m_DedicatedAllocation.m_hMemory;
5923  default:
5924  VMA_ASSERT(0);
5925  return VK_NULL_HANDLE;
5926  }
5927 }
5928 
5929 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5930 {
5931  switch(m_Type)
5932  {
5933  case ALLOCATION_TYPE_BLOCK:
5934  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5935  case ALLOCATION_TYPE_DEDICATED:
5936  return m_DedicatedAllocation.m_MemoryTypeIndex;
5937  default:
5938  VMA_ASSERT(0);
5939  return UINT32_MAX;
5940  }
5941 }
5942 
5943 void* VmaAllocation_T::GetMappedData() const
5944 {
5945  switch(m_Type)
5946  {
5947  case ALLOCATION_TYPE_BLOCK:
5948  if(m_MapCount != 0)
5949  {
5950  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5951  VMA_ASSERT(pBlockData != VMA_NULL);
5952  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5953  }
5954  else
5955  {
5956  return VMA_NULL;
5957  }
5958  break;
5959  case ALLOCATION_TYPE_DEDICATED:
5960  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5961  return m_DedicatedAllocation.m_pMappedData;
5962  default:
5963  VMA_ASSERT(0);
5964  return VMA_NULL;
5965  }
5966 }
5967 
5968 bool VmaAllocation_T::CanBecomeLost() const
5969 {
5970  switch(m_Type)
5971  {
5972  case ALLOCATION_TYPE_BLOCK:
5973  return m_BlockAllocation.m_CanBecomeLost;
5974  case ALLOCATION_TYPE_DEDICATED:
5975  return false;
5976  default:
5977  VMA_ASSERT(0);
5978  return false;
5979  }
5980 }
5981 
5982 VmaPool VmaAllocation_T::GetPool() const
5983 {
5984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5985  return m_BlockAllocation.m_hPool;
5986 }
5987 
5988 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5989 {
5990  VMA_ASSERT(CanBecomeLost());
5991 
5992  /*
5993  Warning: This is a carefully designed algorithm.
5994  Do not modify unless you really know what you're doing :)
5995  */
5996  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5997  for(;;)
5998  {
5999  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6000  {
6001  VMA_ASSERT(0);
6002  return false;
6003  }
6004  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6005  {
6006  return false;
6007  }
6008  else // Last use time earlier than current time.
6009  {
6010  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6011  {
6012  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6013  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6014  return true;
6015  }
6016  }
6017  }
6018 }
6019 
6020 #if VMA_STATS_STRING_ENABLED
6021 
6022 // Correspond to values of enum VmaSuballocationType.
6023 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6024  "FREE",
6025  "UNKNOWN",
6026  "BUFFER",
6027  "IMAGE_UNKNOWN",
6028  "IMAGE_LINEAR",
6029  "IMAGE_OPTIMAL",
6030 };
6031 
6032 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6033 {
6034  json.WriteString("Type");
6035  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6036 
6037  json.WriteString("Size");
6038  json.WriteNumber(m_Size);
6039 
6040  if(m_pUserData != VMA_NULL)
6041  {
6042  json.WriteString("UserData");
6043  if(IsUserDataString())
6044  {
6045  json.WriteString((const char*)m_pUserData);
6046  }
6047  else
6048  {
6049  json.BeginString();
6050  json.ContinueString_Pointer(m_pUserData);
6051  json.EndString();
6052  }
6053  }
6054 
6055  json.WriteString("CreationFrameIndex");
6056  json.WriteNumber(m_CreationFrameIndex);
6057 
6058  json.WriteString("LastUseFrameIndex");
6059  json.WriteNumber(GetLastUseFrameIndex());
6060 
6061  if(m_BufferImageUsage != 0)
6062  {
6063  json.WriteString("Usage");
6064  json.WriteNumber(m_BufferImageUsage);
6065  }
6066 }
6067 
6068 #endif
6069 
6070 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6071 {
6072  VMA_ASSERT(IsUserDataString());
6073  if(m_pUserData != VMA_NULL)
6074  {
6075  char* const oldStr = (char*)m_pUserData;
6076  const size_t oldStrLen = strlen(oldStr);
6077  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6078  m_pUserData = VMA_NULL;
6079  }
6080 }
6081 
6082 void VmaAllocation_T::BlockAllocMap()
6083 {
6084  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6085 
6086  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6087  {
6088  ++m_MapCount;
6089  }
6090  else
6091  {
6092  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6093  }
6094 }
6095 
6096 void VmaAllocation_T::BlockAllocUnmap()
6097 {
6098  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6099 
6100  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6101  {
6102  --m_MapCount;
6103  }
6104  else
6105  {
6106  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6107  }
6108 }
6109 
6110 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6111 {
6112  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6113 
6114  if(m_MapCount != 0)
6115  {
6116  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6117  {
6118  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6119  *ppData = m_DedicatedAllocation.m_pMappedData;
6120  ++m_MapCount;
6121  return VK_SUCCESS;
6122  }
6123  else
6124  {
6125  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6126  return VK_ERROR_MEMORY_MAP_FAILED;
6127  }
6128  }
6129  else
6130  {
6131  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6132  hAllocator->m_hDevice,
6133  m_DedicatedAllocation.m_hMemory,
6134  0, // offset
6135  VK_WHOLE_SIZE,
6136  0, // flags
6137  ppData);
6138  if(result == VK_SUCCESS)
6139  {
6140  m_DedicatedAllocation.m_pMappedData = *ppData;
6141  m_MapCount = 1;
6142  }
6143  return result;
6144  }
6145 }
6146 
6147 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6148 {
6149  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6150 
6151  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6152  {
6153  --m_MapCount;
6154  if(m_MapCount == 0)
6155  {
6156  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6157  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6158  hAllocator->m_hDevice,
6159  m_DedicatedAllocation.m_hMemory);
6160  }
6161  }
6162  else
6163  {
6164  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6165  }
6166 }
6167 
6168 #if VMA_STATS_STRING_ENABLED
6169 
6170 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6171 {
6172  json.BeginObject();
6173 
6174  json.WriteString("Blocks");
6175  json.WriteNumber(stat.blockCount);
6176 
6177  json.WriteString("Allocations");
6178  json.WriteNumber(stat.allocationCount);
6179 
6180  json.WriteString("UnusedRanges");
6181  json.WriteNumber(stat.unusedRangeCount);
6182 
6183  json.WriteString("UsedBytes");
6184  json.WriteNumber(stat.usedBytes);
6185 
6186  json.WriteString("UnusedBytes");
6187  json.WriteNumber(stat.unusedBytes);
6188 
6189  if(stat.allocationCount > 1)
6190  {
6191  json.WriteString("AllocationSize");
6192  json.BeginObject(true);
6193  json.WriteString("Min");
6194  json.WriteNumber(stat.allocationSizeMin);
6195  json.WriteString("Avg");
6196  json.WriteNumber(stat.allocationSizeAvg);
6197  json.WriteString("Max");
6198  json.WriteNumber(stat.allocationSizeMax);
6199  json.EndObject();
6200  }
6201 
6202  if(stat.unusedRangeCount > 1)
6203  {
6204  json.WriteString("UnusedRangeSize");
6205  json.BeginObject(true);
6206  json.WriteString("Min");
6207  json.WriteNumber(stat.unusedRangeSizeMin);
6208  json.WriteString("Avg");
6209  json.WriteNumber(stat.unusedRangeSizeAvg);
6210  json.WriteString("Max");
6211  json.WriteNumber(stat.unusedRangeSizeMax);
6212  json.EndObject();
6213  }
6214 
6215  json.EndObject();
6216 }
6217 
6218 #endif // #if VMA_STATS_STRING_ENABLED
6219 
6220 struct VmaSuballocationItemSizeLess
6221 {
6222  bool operator()(
6223  const VmaSuballocationList::iterator lhs,
6224  const VmaSuballocationList::iterator rhs) const
6225  {
6226  return lhs->size < rhs->size;
6227  }
6228  bool operator()(
6229  const VmaSuballocationList::iterator lhs,
6230  VkDeviceSize rhsSize) const
6231  {
6232  return lhs->size < rhsSize;
6233  }
6234 };
6235 
6236 
6238 // class VmaBlockMetadata
6239 
6240 #if VMA_STATS_STRING_ENABLED
6241 
6242 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6243  VkDeviceSize unusedBytes,
6244  size_t allocationCount,
6245  size_t unusedRangeCount) const
6246 {
6247  json.BeginObject();
6248 
6249  json.WriteString("TotalBytes");
6250  json.WriteNumber(GetSize());
6251 
6252  json.WriteString("UnusedBytes");
6253  json.WriteNumber(unusedBytes);
6254 
6255  json.WriteString("Allocations");
6256  json.WriteNumber((uint64_t)allocationCount);
6257 
6258  json.WriteString("UnusedRanges");
6259  json.WriteNumber((uint64_t)unusedRangeCount);
6260 
6261  json.WriteString("Suballocations");
6262  json.BeginArray();
6263 }
6264 
6265 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6266  VkDeviceSize offset,
6267  VmaAllocation hAllocation) const
6268 {
6269  json.BeginObject(true);
6270 
6271  json.WriteString("Offset");
6272  json.WriteNumber(offset);
6273 
6274  hAllocation->PrintParameters(json);
6275 
6276  json.EndObject();
6277 }
6278 
6279 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6280  VkDeviceSize offset,
6281  VkDeviceSize size) const
6282 {
6283  json.BeginObject(true);
6284 
6285  json.WriteString("Offset");
6286  json.WriteNumber(offset);
6287 
6288  json.WriteString("Type");
6289  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6290 
6291  json.WriteString("Size");
6292  json.WriteNumber(size);
6293 
6294  json.EndObject();
6295 }
6296 
6297 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6298 {
6299  json.EndArray();
6300  json.EndObject();
6301 }
6302 
6303 #endif // #if VMA_STATS_STRING_ENABLED
6304 
6306 // class VmaBlockMetadata_Generic
6307 
6308 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6309  m_FreeCount(0),
6310  m_SumFreeSize(0),
6311  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6312  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6313 {
6314 }
6315 
6316 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6317 {
6318 }
6319 
6320 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6321 {
6322  VmaBlockMetadata::Init(size);
6323  m_FreeCount = 1;
6324  m_SumFreeSize = size;
6325 
6326  VmaSuballocation suballoc = {};
6327  suballoc.offset = 0;
6328  suballoc.size = size;
6329  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6330  suballoc.hAllocation = VK_NULL_HANDLE;
6331 
6332  m_Suballocations.push_back(suballoc);
6333  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6334  --suballocItem;
6335  m_FreeSuballocationsBySize.push_back(suballocItem);
6336 }
6337 
6338 bool VmaBlockMetadata_Generic::Validate() const
6339 {
6340  if(m_Suballocations.empty())
6341  {
6342  return false;
6343  }
6344 
6345  // Expected offset of new suballocation as calculated from previous ones.
6346  VkDeviceSize calculatedOffset = 0;
6347  // Expected number of free suballocations as calculated from traversing their list.
6348  uint32_t calculatedFreeCount = 0;
6349  // Expected sum size of free suballocations as calculated from traversing their list.
6350  VkDeviceSize calculatedSumFreeSize = 0;
6351  // Expected number of free suballocations that should be registered in
6352  // m_FreeSuballocationsBySize calculated from traversing their list.
6353  size_t freeSuballocationsToRegister = 0;
6354  // True if previous visited suballocation was free.
6355  bool prevFree = false;
6356 
6357  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6358  suballocItem != m_Suballocations.cend();
6359  ++suballocItem)
6360  {
6361  const VmaSuballocation& subAlloc = *suballocItem;
6362 
6363  // Actual offset of this suballocation doesn't match expected one.
6364  if(subAlloc.offset != calculatedOffset)
6365  {
6366  return false;
6367  }
6368 
6369  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6370  // Two adjacent free suballocations are invalid. They should be merged.
6371  if(prevFree && currFree)
6372  {
6373  return false;
6374  }
6375 
6376  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
6377  {
6378  return false;
6379  }
6380 
6381  if(currFree)
6382  {
6383  calculatedSumFreeSize += subAlloc.size;
6384  ++calculatedFreeCount;
6385  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6386  {
6387  ++freeSuballocationsToRegister;
6388  }
6389 
6390  // Margin required between allocations - every free space must be at least that large.
6391  if(subAlloc.size < VMA_DEBUG_MARGIN)
6392  {
6393  return false;
6394  }
6395  }
6396  else
6397  {
6398  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
6399  {
6400  return false;
6401  }
6402  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
6403  {
6404  return false;
6405  }
6406 
6407  // Margin required between allocations - previous allocation must be free.
6408  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
6409  {
6410  return false;
6411  }
6412  }
6413 
6414  calculatedOffset += subAlloc.size;
6415  prevFree = currFree;
6416  }
6417 
6418  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6419  // match expected one.
6420  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
6421  {
6422  return false;
6423  }
6424 
6425  VkDeviceSize lastSize = 0;
6426  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6427  {
6428  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6429 
6430  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6431  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
6432  {
6433  return false;
6434  }
6435  // They must be sorted by size ascending.
6436  if(suballocItem->size < lastSize)
6437  {
6438  return false;
6439  }
6440 
6441  lastSize = suballocItem->size;
6442  }
6443 
6444  // Check if totals match calculacted values.
6445  if(!ValidateFreeSuballocationList() ||
6446  (calculatedOffset != GetSize()) ||
6447  (calculatedSumFreeSize != m_SumFreeSize) ||
6448  (calculatedFreeCount != m_FreeCount))
6449  {
6450  return false;
6451  }
6452 
6453  return true;
6454 }
6455 
6456 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6457 {
6458  if(!m_FreeSuballocationsBySize.empty())
6459  {
6460  return m_FreeSuballocationsBySize.back()->size;
6461  }
6462  else
6463  {
6464  return 0;
6465  }
6466 }
6467 
6468 bool VmaBlockMetadata_Generic::IsEmpty() const
6469 {
6470  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6471 }
6472 
6473 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6474 {
6475  outInfo.blockCount = 1;
6476 
6477  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6478  outInfo.allocationCount = rangeCount - m_FreeCount;
6479  outInfo.unusedRangeCount = m_FreeCount;
6480 
6481  outInfo.unusedBytes = m_SumFreeSize;
6482  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6483 
6484  outInfo.allocationSizeMin = UINT64_MAX;
6485  outInfo.allocationSizeMax = 0;
6486  outInfo.unusedRangeSizeMin = UINT64_MAX;
6487  outInfo.unusedRangeSizeMax = 0;
6488 
6489  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6490  suballocItem != m_Suballocations.cend();
6491  ++suballocItem)
6492  {
6493  const VmaSuballocation& suballoc = *suballocItem;
6494  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6495  {
6496  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6497  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6498  }
6499  else
6500  {
6501  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6502  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6503  }
6504  }
6505 }
6506 
6507 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6508 {
6509  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6510 
6511  inoutStats.size += GetSize();
6512  inoutStats.unusedSize += m_SumFreeSize;
6513  inoutStats.allocationCount += rangeCount - m_FreeCount;
6514  inoutStats.unusedRangeCount += m_FreeCount;
6515  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6516 }
6517 
6518 #if VMA_STATS_STRING_ENABLED
6519 
6520 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6521 {
6522  PrintDetailedMap_Begin(json,
6523  m_SumFreeSize, // unusedBytes
6524  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6525  m_FreeCount); // unusedRangeCount
6526 
6527  size_t i = 0;
6528  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6529  suballocItem != m_Suballocations.cend();
6530  ++suballocItem, ++i)
6531  {
6532  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6533  {
6534  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6535  }
6536  else
6537  {
6538  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6539  }
6540  }
6541 
6542  PrintDetailedMap_End(json);
6543 }
6544 
6545 #endif // #if VMA_STATS_STRING_ENABLED
6546 
6547 /*
6548 How many suitable free suballocations to analyze before choosing best one.
6549 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
6550  be chosen.
6551 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
6552  suballocations will be analized and best one will be chosen.
6553 - Any other value is also acceptable.
6554 */
6555 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
6556 
6557 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6558  uint32_t currentFrameIndex,
6559  uint32_t frameInUseCount,
6560  VkDeviceSize bufferImageGranularity,
6561  VkDeviceSize allocSize,
6562  VkDeviceSize allocAlignment,
6563  bool upperAddress,
6564  VmaSuballocationType allocType,
6565  bool canMakeOtherLost,
6566  VmaAllocationRequest* pAllocationRequest)
6567 {
6568  VMA_ASSERT(allocSize > 0);
6569  VMA_ASSERT(!upperAddress);
6570  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6571  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6572  VMA_HEAVY_ASSERT(Validate());
6573 
6574  // There is not enough total free space in this block to fullfill the request: Early return.
6575  if(canMakeOtherLost == false && m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6576  {
6577  return false;
6578  }
6579 
6580  // New algorithm, efficiently searching freeSuballocationsBySize.
6581  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6582  if(freeSuballocCount > 0)
6583  {
6584  if(VMA_BEST_FIT)
6585  {
6586  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6587  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6588  m_FreeSuballocationsBySize.data(),
6589  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6590  allocSize + 2 * VMA_DEBUG_MARGIN,
6591  VmaSuballocationItemSizeLess());
6592  size_t index = it - m_FreeSuballocationsBySize.data();
6593  for(; index < freeSuballocCount; ++index)
6594  {
6595  if(CheckAllocation(
6596  currentFrameIndex,
6597  frameInUseCount,
6598  bufferImageGranularity,
6599  allocSize,
6600  allocAlignment,
6601  allocType,
6602  m_FreeSuballocationsBySize[index],
6603  false, // canMakeOtherLost
6604  &pAllocationRequest->offset,
6605  &pAllocationRequest->itemsToMakeLostCount,
6606  &pAllocationRequest->sumFreeSize,
6607  &pAllocationRequest->sumItemSize))
6608  {
6609  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6610  return true;
6611  }
6612  }
6613  }
6614  else
6615  {
6616  // Search staring from biggest suballocations.
6617  for(size_t index = freeSuballocCount; index--; )
6618  {
6619  if(CheckAllocation(
6620  currentFrameIndex,
6621  frameInUseCount,
6622  bufferImageGranularity,
6623  allocSize,
6624  allocAlignment,
6625  allocType,
6626  m_FreeSuballocationsBySize[index],
6627  false, // canMakeOtherLost
6628  &pAllocationRequest->offset,
6629  &pAllocationRequest->itemsToMakeLostCount,
6630  &pAllocationRequest->sumFreeSize,
6631  &pAllocationRequest->sumItemSize))
6632  {
6633  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6634  return true;
6635  }
6636  }
6637  }
6638  }
6639 
6640  if(canMakeOtherLost)
6641  {
6642  // Brute-force algorithm. TODO: Come up with something better.
6643 
6644  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6645  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6646 
6647  VmaAllocationRequest tmpAllocRequest = {};
6648  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6649  suballocIt != m_Suballocations.end();
6650  ++suballocIt)
6651  {
6652  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6653  suballocIt->hAllocation->CanBecomeLost())
6654  {
6655  if(CheckAllocation(
6656  currentFrameIndex,
6657  frameInUseCount,
6658  bufferImageGranularity,
6659  allocSize,
6660  allocAlignment,
6661  allocType,
6662  suballocIt,
6663  canMakeOtherLost,
6664  &tmpAllocRequest.offset,
6665  &tmpAllocRequest.itemsToMakeLostCount,
6666  &tmpAllocRequest.sumFreeSize,
6667  &tmpAllocRequest.sumItemSize))
6668  {
6669  tmpAllocRequest.item = suballocIt;
6670 
6671  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
6672  {
6673  *pAllocationRequest = tmpAllocRequest;
6674  }
6675  }
6676  }
6677  }
6678 
6679  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6680  {
6681  return true;
6682  }
6683  }
6684 
6685  return false;
6686 }
6687 
6688 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
6689  uint32_t currentFrameIndex,
6690  uint32_t frameInUseCount,
6691  VmaAllocationRequest* pAllocationRequest)
6692 {
6693  while(pAllocationRequest->itemsToMakeLostCount > 0)
6694  {
6695  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6696  {
6697  ++pAllocationRequest->item;
6698  }
6699  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6700  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6701  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6702  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6703  {
6704  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6705  --pAllocationRequest->itemsToMakeLostCount;
6706  }
6707  else
6708  {
6709  return false;
6710  }
6711  }
6712 
6713  VMA_HEAVY_ASSERT(Validate());
6714  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6715  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6716 
6717  return true;
6718 }
6719 
6720 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6721 {
6722  uint32_t lostAllocationCount = 0;
6723  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6724  it != m_Suballocations.end();
6725  ++it)
6726  {
6727  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6728  it->hAllocation->CanBecomeLost() &&
6729  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6730  {
6731  it = FreeSuballocation(it);
6732  ++lostAllocationCount;
6733  }
6734  }
6735  return lostAllocationCount;
6736 }
6737 
6738 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
6739 {
6740  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6741  it != m_Suballocations.end();
6742  ++it)
6743  {
6744  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6745  {
6746  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6747  {
6748  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6749  return VK_ERROR_VALIDATION_FAILED_EXT;
6750  }
6751  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6752  {
6753  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6754  return VK_ERROR_VALIDATION_FAILED_EXT;
6755  }
6756  }
6757  }
6758 
6759  return VK_SUCCESS;
6760 }
6761 
6762 void VmaBlockMetadata_Generic::Alloc(
6763  const VmaAllocationRequest& request,
6764  VmaSuballocationType type,
6765  VkDeviceSize allocSize,
6766  bool upperAddress,
6767  VmaAllocation hAllocation)
6768 {
6769  VMA_ASSERT(!upperAddress);
6770  VMA_ASSERT(request.item != m_Suballocations.end());
6771  VmaSuballocation& suballoc = *request.item;
6772  // Given suballocation is a free block.
6773  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6774  // Given offset is inside this suballocation.
6775  VMA_ASSERT(request.offset >= suballoc.offset);
6776  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
6777  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
6778  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
6779 
6780  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
6781  // it to become used.
6782  UnregisterFreeSuballocation(request.item);
6783 
6784  suballoc.offset = request.offset;
6785  suballoc.size = allocSize;
6786  suballoc.type = type;
6787  suballoc.hAllocation = hAllocation;
6788 
6789  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
6790  if(paddingEnd)
6791  {
6792  VmaSuballocation paddingSuballoc = {};
6793  paddingSuballoc.offset = request.offset + allocSize;
6794  paddingSuballoc.size = paddingEnd;
6795  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6796  VmaSuballocationList::iterator next = request.item;
6797  ++next;
6798  const VmaSuballocationList::iterator paddingEndItem =
6799  m_Suballocations.insert(next, paddingSuballoc);
6800  RegisterFreeSuballocation(paddingEndItem);
6801  }
6802 
6803  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
6804  if(paddingBegin)
6805  {
6806  VmaSuballocation paddingSuballoc = {};
6807  paddingSuballoc.offset = request.offset - paddingBegin;
6808  paddingSuballoc.size = paddingBegin;
6809  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6810  const VmaSuballocationList::iterator paddingBeginItem =
6811  m_Suballocations.insert(request.item, paddingSuballoc);
6812  RegisterFreeSuballocation(paddingBeginItem);
6813  }
6814 
6815  // Update totals.
6816  m_FreeCount = m_FreeCount - 1;
6817  if(paddingBegin > 0)
6818  {
6819  ++m_FreeCount;
6820  }
6821  if(paddingEnd > 0)
6822  {
6823  ++m_FreeCount;
6824  }
6825  m_SumFreeSize -= allocSize;
6826 }
6827 
6828 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
6829 {
6830  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6831  suballocItem != m_Suballocations.end();
6832  ++suballocItem)
6833  {
6834  VmaSuballocation& suballoc = *suballocItem;
6835  if(suballoc.hAllocation == allocation)
6836  {
6837  FreeSuballocation(suballocItem);
6838  VMA_HEAVY_ASSERT(Validate());
6839  return;
6840  }
6841  }
6842  VMA_ASSERT(0 && "Not found!");
6843 }
6844 
6845 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
6846 {
6847  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6848  suballocItem != m_Suballocations.end();
6849  ++suballocItem)
6850  {
6851  VmaSuballocation& suballoc = *suballocItem;
6852  if(suballoc.offset == offset)
6853  {
6854  FreeSuballocation(suballocItem);
6855  return;
6856  }
6857  }
6858  VMA_ASSERT(0 && "Not found!");
6859 }
6860 
6861 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
6862 {
6863  VkDeviceSize lastSize = 0;
6864  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
6865  {
6866  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
6867 
6868  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6869  {
6870  VMA_ASSERT(0);
6871  return false;
6872  }
6873  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6874  {
6875  VMA_ASSERT(0);
6876  return false;
6877  }
6878  if(it->size < lastSize)
6879  {
6880  VMA_ASSERT(0);
6881  return false;
6882  }
6883 
6884  lastSize = it->size;
6885  }
6886  return true;
6887 }
6888 
6889 bool VmaBlockMetadata_Generic::CheckAllocation(
6890  uint32_t currentFrameIndex,
6891  uint32_t frameInUseCount,
6892  VkDeviceSize bufferImageGranularity,
6893  VkDeviceSize allocSize,
6894  VkDeviceSize allocAlignment,
6895  VmaSuballocationType allocType,
6896  VmaSuballocationList::const_iterator suballocItem,
6897  bool canMakeOtherLost,
6898  VkDeviceSize* pOffset,
6899  size_t* itemsToMakeLostCount,
6900  VkDeviceSize* pSumFreeSize,
6901  VkDeviceSize* pSumItemSize) const
6902 {
6903  VMA_ASSERT(allocSize > 0);
6904  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6905  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6906  VMA_ASSERT(pOffset != VMA_NULL);
6907 
6908  *itemsToMakeLostCount = 0;
6909  *pSumFreeSize = 0;
6910  *pSumItemSize = 0;
6911 
6912  if(canMakeOtherLost)
6913  {
6914  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6915  {
6916  *pSumFreeSize = suballocItem->size;
6917  }
6918  else
6919  {
6920  if(suballocItem->hAllocation->CanBecomeLost() &&
6921  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6922  {
6923  ++*itemsToMakeLostCount;
6924  *pSumItemSize = suballocItem->size;
6925  }
6926  else
6927  {
6928  return false;
6929  }
6930  }
6931 
6932  // Remaining size is too small for this request: Early return.
6933  if(GetSize() - suballocItem->offset < allocSize)
6934  {
6935  return false;
6936  }
6937 
6938  // Start from offset equal to beginning of this suballocation.
6939  *pOffset = suballocItem->offset;
6940 
6941  // Apply VMA_DEBUG_MARGIN at the beginning.
6942  if(VMA_DEBUG_MARGIN > 0)
6943  {
6944  *pOffset += VMA_DEBUG_MARGIN;
6945  }
6946 
6947  // Apply alignment.
6948  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6949 
6950  // Check previous suballocations for BufferImageGranularity conflicts.
6951  // Make bigger alignment if necessary.
6952  if(bufferImageGranularity > 1)
6953  {
6954  bool bufferImageGranularityConflict = false;
6955  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6956  while(prevSuballocItem != m_Suballocations.cbegin())
6957  {
6958  --prevSuballocItem;
6959  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6960  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6961  {
6962  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6963  {
6964  bufferImageGranularityConflict = true;
6965  break;
6966  }
6967  }
6968  else
6969  // Already on previous page.
6970  break;
6971  }
6972  if(bufferImageGranularityConflict)
6973  {
6974  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6975  }
6976  }
6977 
6978  // Now that we have final *pOffset, check if we are past suballocItem.
6979  // If yes, return false - this function should be called for another suballocItem as starting point.
6980  if(*pOffset >= suballocItem->offset + suballocItem->size)
6981  {
6982  return false;
6983  }
6984 
6985  // Calculate padding at the beginning based on current offset.
6986  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6987 
6988  // Calculate required margin at the end.
6989  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
6990 
6991  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6992  // Another early return check.
6993  if(suballocItem->offset + totalSize > GetSize())
6994  {
6995  return false;
6996  }
6997 
6998  // Advance lastSuballocItem until desired size is reached.
6999  // Update itemsToMakeLostCount.
7000  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7001  if(totalSize > suballocItem->size)
7002  {
7003  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7004  while(remainingSize > 0)
7005  {
7006  ++lastSuballocItem;
7007  if(lastSuballocItem == m_Suballocations.cend())
7008  {
7009  return false;
7010  }
7011  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7012  {
7013  *pSumFreeSize += lastSuballocItem->size;
7014  }
7015  else
7016  {
7017  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7018  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7019  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7020  {
7021  ++*itemsToMakeLostCount;
7022  *pSumItemSize += lastSuballocItem->size;
7023  }
7024  else
7025  {
7026  return false;
7027  }
7028  }
7029  remainingSize = (lastSuballocItem->size < remainingSize) ?
7030  remainingSize - lastSuballocItem->size : 0;
7031  }
7032  }
7033 
7034  // Check next suballocations for BufferImageGranularity conflicts.
7035  // If conflict exists, we must mark more allocations lost or fail.
7036  if(bufferImageGranularity > 1)
7037  {
7038  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7039  ++nextSuballocItem;
7040  while(nextSuballocItem != m_Suballocations.cend())
7041  {
7042  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7043  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7044  {
7045  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7046  {
7047  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7048  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7049  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7050  {
7051  ++*itemsToMakeLostCount;
7052  }
7053  else
7054  {
7055  return false;
7056  }
7057  }
7058  }
7059  else
7060  {
7061  // Already on next page.
7062  break;
7063  }
7064  ++nextSuballocItem;
7065  }
7066  }
7067  }
7068  else
7069  {
7070  const VmaSuballocation& suballoc = *suballocItem;
7071  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7072 
7073  *pSumFreeSize = suballoc.size;
7074 
7075  // Size of this suballocation is too small for this request: Early return.
7076  if(suballoc.size < allocSize)
7077  {
7078  return false;
7079  }
7080 
7081  // Start from offset equal to beginning of this suballocation.
7082  *pOffset = suballoc.offset;
7083 
7084  // Apply VMA_DEBUG_MARGIN at the beginning.
7085  if(VMA_DEBUG_MARGIN > 0)
7086  {
7087  *pOffset += VMA_DEBUG_MARGIN;
7088  }
7089 
7090  // Apply alignment.
7091  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7092 
7093  // Check previous suballocations for BufferImageGranularity conflicts.
7094  // Make bigger alignment if necessary.
7095  if(bufferImageGranularity > 1)
7096  {
7097  bool bufferImageGranularityConflict = false;
7098  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7099  while(prevSuballocItem != m_Suballocations.cbegin())
7100  {
7101  --prevSuballocItem;
7102  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7103  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7104  {
7105  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7106  {
7107  bufferImageGranularityConflict = true;
7108  break;
7109  }
7110  }
7111  else
7112  // Already on previous page.
7113  break;
7114  }
7115  if(bufferImageGranularityConflict)
7116  {
7117  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7118  }
7119  }
7120 
7121  // Calculate padding at the beginning based on current offset.
7122  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7123 
7124  // Calculate required margin at the end.
7125  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7126 
7127  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7128  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7129  {
7130  return false;
7131  }
7132 
7133  // Check next suballocations for BufferImageGranularity conflicts.
7134  // If conflict exists, allocation cannot be made here.
7135  if(bufferImageGranularity > 1)
7136  {
7137  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7138  ++nextSuballocItem;
7139  while(nextSuballocItem != m_Suballocations.cend())
7140  {
7141  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7142  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7143  {
7144  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7145  {
7146  return false;
7147  }
7148  }
7149  else
7150  {
7151  // Already on next page.
7152  break;
7153  }
7154  ++nextSuballocItem;
7155  }
7156  }
7157  }
7158 
7159  // All tests passed: Success. pOffset is already filled.
7160  return true;
7161 }
7162 
7163 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7164 {
7165  VMA_ASSERT(item != m_Suballocations.end());
7166  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7167 
7168  VmaSuballocationList::iterator nextItem = item;
7169  ++nextItem;
7170  VMA_ASSERT(nextItem != m_Suballocations.end());
7171  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7172 
7173  item->size += nextItem->size;
7174  --m_FreeCount;
7175  m_Suballocations.erase(nextItem);
7176 }
7177 
7178 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7179 {
7180  // Change this suballocation to be marked as free.
7181  VmaSuballocation& suballoc = *suballocItem;
7182  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7183  suballoc.hAllocation = VK_NULL_HANDLE;
7184 
7185  // Update totals.
7186  ++m_FreeCount;
7187  m_SumFreeSize += suballoc.size;
7188 
7189  // Merge with previous and/or next suballocation if it's also free.
7190  bool mergeWithNext = false;
7191  bool mergeWithPrev = false;
7192 
7193  VmaSuballocationList::iterator nextItem = suballocItem;
7194  ++nextItem;
7195  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7196  {
7197  mergeWithNext = true;
7198  }
7199 
7200  VmaSuballocationList::iterator prevItem = suballocItem;
7201  if(suballocItem != m_Suballocations.begin())
7202  {
7203  --prevItem;
7204  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7205  {
7206  mergeWithPrev = true;
7207  }
7208  }
7209 
7210  if(mergeWithNext)
7211  {
7212  UnregisterFreeSuballocation(nextItem);
7213  MergeFreeWithNext(suballocItem);
7214  }
7215 
7216  if(mergeWithPrev)
7217  {
7218  UnregisterFreeSuballocation(prevItem);
7219  MergeFreeWithNext(prevItem);
7220  RegisterFreeSuballocation(prevItem);
7221  return prevItem;
7222  }
7223  else
7224  {
7225  RegisterFreeSuballocation(suballocItem);
7226  return suballocItem;
7227  }
7228 }
7229 
7230 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7231 {
7232  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7233  VMA_ASSERT(item->size > 0);
7234 
7235  // You may want to enable this validation at the beginning or at the end of
7236  // this function, depending on what do you want to check.
7237  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7238 
7239  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7240  {
7241  if(m_FreeSuballocationsBySize.empty())
7242  {
7243  m_FreeSuballocationsBySize.push_back(item);
7244  }
7245  else
7246  {
7247  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7248  }
7249  }
7250 
7251  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7252 }
7253 
7254 
7255 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7256 {
7257  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7258  VMA_ASSERT(item->size > 0);
7259 
7260  // You may want to enable this validation at the beginning or at the end of
7261  // this function, depending on what do you want to check.
7262  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7263 
7264  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7265  {
7266  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7267  m_FreeSuballocationsBySize.data(),
7268  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7269  item,
7270  VmaSuballocationItemSizeLess());
7271  for(size_t index = it - m_FreeSuballocationsBySize.data();
7272  index < m_FreeSuballocationsBySize.size();
7273  ++index)
7274  {
7275  if(m_FreeSuballocationsBySize[index] == item)
7276  {
7277  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7278  return;
7279  }
7280  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7281  }
7282  VMA_ASSERT(0 && "Not found.");
7283  }
7284 
7285  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7286 }
7287 
7289 // class VmaBlockMetadata_Linear
7290 
7291 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7292  m_SumFreeSize(0),
7293  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7294  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7295  m_1stVectorIndex(0),
7296  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7297  m_1stNullItemsBeginCount(0),
7298  m_1stNullItemsMiddleCount(0),
7299  m_2ndNullItemsCount(0)
7300 {
7301 }
7302 
7303 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7304 {
7305 }
7306 
7307 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7308 {
7309  VmaBlockMetadata::Init(size);
7310  m_SumFreeSize = size;
7311 }
7312 
7313 bool VmaBlockMetadata_Linear::Validate() const
7314 {
7315  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7316  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7317 
7318  if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))
7319  {
7320  return false;
7321  }
7322  if(suballocations1st.empty() && !suballocations2nd.empty() &&
7323  m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7324  {
7325  return false;
7326  }
7327  if(!suballocations1st.empty())
7328  {
7329  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7330  if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
7331  {
7332  return false;
7333  }
7334  // Null item at the end should be just pop_back().
7335  if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)
7336  {
7337  return false;
7338  }
7339  }
7340  if(!suballocations2nd.empty())
7341  {
7342  // Null item at the end should be just pop_back().
7343  if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
7344  {
7345  return false;
7346  }
7347  }
7348 
7349  if(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount > suballocations1st.size())
7350  {
7351  return false;
7352  }
7353  if(m_2ndNullItemsCount > suballocations2nd.size())
7354  {
7355  return false;
7356  }
7357 
7358  VkDeviceSize sumUsedSize = 0;
7359  const size_t suballoc1stCount = suballocations1st.size();
7360  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7361 
7362  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7363  {
7364  const size_t suballoc2ndCount = suballocations2nd.size();
7365  size_t nullItem2ndCount = 0;
7366  for(size_t i = 0; i < suballoc2ndCount; ++i)
7367  {
7368  const VmaSuballocation& suballoc = suballocations2nd[i];
7369  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7370 
7371  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7372  {
7373  return false;
7374  }
7375  if(suballoc.offset < offset)
7376  {
7377  return false;
7378  }
7379 
7380  if(!currFree)
7381  {
7382  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7383  {
7384  return false;
7385  }
7386  if(suballoc.hAllocation->GetSize() != suballoc.size)
7387  {
7388  return false;
7389  }
7390  sumUsedSize += suballoc.size;
7391  }
7392  else
7393  {
7394  ++nullItem2ndCount;
7395  }
7396 
7397  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7398  }
7399 
7400  if(nullItem2ndCount != m_2ndNullItemsCount)
7401  {
7402  return false;
7403  }
7404  }
7405 
7406  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7407  {
7408  const VmaSuballocation& suballoc = suballocations1st[i];
7409  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||
7410  suballoc.hAllocation != VK_NULL_HANDLE)
7411  {
7412  return false;
7413  }
7414  }
7415 
7416  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7417 
7418  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7419  {
7420  const VmaSuballocation& suballoc = suballocations1st[i];
7421  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7422 
7423  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7424  {
7425  return false;
7426  }
7427  if(suballoc.offset < offset)
7428  {
7429  return false;
7430  }
7431  if(i < m_1stNullItemsBeginCount && !currFree)
7432  {
7433  return false;
7434  }
7435 
7436  if(!currFree)
7437  {
7438  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7439  {
7440  return false;
7441  }
7442  if(suballoc.hAllocation->GetSize() != suballoc.size)
7443  {
7444  return false;
7445  }
7446  sumUsedSize += suballoc.size;
7447  }
7448  else
7449  {
7450  ++nullItem1stCount;
7451  }
7452 
7453  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7454  }
7455  if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)
7456  {
7457  return false;
7458  }
7459 
7460  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7461  {
7462  const size_t suballoc2ndCount = suballocations2nd.size();
7463  size_t nullItem2ndCount = 0;
7464  for(size_t i = suballoc2ndCount; i--; )
7465  {
7466  const VmaSuballocation& suballoc = suballocations2nd[i];
7467  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7468 
7469  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7470  {
7471  return false;
7472  }
7473  if(suballoc.offset < offset)
7474  {
7475  return false;
7476  }
7477 
7478  if(!currFree)
7479  {
7480  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7481  {
7482  return false;
7483  }
7484  if(suballoc.hAllocation->GetSize() != suballoc.size)
7485  {
7486  return false;
7487  }
7488  sumUsedSize += suballoc.size;
7489  }
7490  else
7491  {
7492  ++nullItem2ndCount;
7493  }
7494 
7495  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7496  }
7497 
7498  if(nullItem2ndCount != m_2ndNullItemsCount)
7499  {
7500  return false;
7501  }
7502  }
7503 
7504  if(offset > GetSize())
7505  {
7506  return false;
7507  }
7508  if(m_SumFreeSize != GetSize() - sumUsedSize)
7509  {
7510  return false;
7511  }
7512 
7513  return true;
7514 }
7515 
7516 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7517 {
7518  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7519  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7520 }
7521 
7522 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7523 {
7524  const VkDeviceSize size = GetSize();
7525 
7526  /*
7527  We don't consider gaps inside allocation vectors with freed allocations because
7528  they are not suitable for reuse in linear allocator. We consider only space that
7529  is available for new allocations.
7530  */
7531  if(IsEmpty())
7532  {
7533  return size;
7534  }
7535 
7536  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7537 
7538  switch(m_2ndVectorMode)
7539  {
7540  case SECOND_VECTOR_EMPTY:
7541  /*
7542  Available space is after end of 1st, as well as before beginning of 1st (which
7543  whould make it a ring buffer).
7544  */
7545  {
7546  const size_t suballocations1stCount = suballocations1st.size();
7547  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7548  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7549  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7550  return VMA_MAX(
7551  firstSuballoc.offset,
7552  size - (lastSuballoc.offset + lastSuballoc.size));
7553  }
7554  break;
7555 
7556  case SECOND_VECTOR_RING_BUFFER:
7557  /*
7558  Available space is only between end of 2nd and beginning of 1st.
7559  */
7560  {
7561  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7562  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7563  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7564  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7565  }
7566  break;
7567 
7568  case SECOND_VECTOR_DOUBLE_STACK:
7569  /*
7570  Available space is only between end of 1st and top of 2nd.
7571  */
7572  {
7573  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7574  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7575  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7576  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7577  }
7578  break;
7579 
7580  default:
7581  VMA_ASSERT(0);
7582  return 0;
7583  }
7584 }
7585 
7586 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7587 {
7588  const VkDeviceSize size = GetSize();
7589  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7590  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7591  const size_t suballoc1stCount = suballocations1st.size();
7592  const size_t suballoc2ndCount = suballocations2nd.size();
7593 
7594  outInfo.blockCount = 1;
7595  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7596  outInfo.unusedRangeCount = 0;
7597  outInfo.usedBytes = 0;
7598  outInfo.allocationSizeMin = UINT64_MAX;
7599  outInfo.allocationSizeMax = 0;
7600  outInfo.unusedRangeSizeMin = UINT64_MAX;
7601  outInfo.unusedRangeSizeMax = 0;
7602 
7603  VkDeviceSize lastOffset = 0;
7604 
7605  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7606  {
7607  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7608  size_t nextAlloc2ndIndex = 0;
7609  while(lastOffset < freeSpace2ndTo1stEnd)
7610  {
7611  // Find next non-null allocation or move nextAllocIndex to the end.
7612  while(nextAlloc2ndIndex < suballoc2ndCount &&
7613  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7614  {
7615  ++nextAlloc2ndIndex;
7616  }
7617 
7618  // Found non-null allocation.
7619  if(nextAlloc2ndIndex < suballoc2ndCount)
7620  {
7621  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7622 
7623  // 1. Process free space before this allocation.
7624  if(lastOffset < suballoc.offset)
7625  {
7626  // There is free space from lastOffset to suballoc.offset.
7627  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7628  ++outInfo.unusedRangeCount;
7629  outInfo.unusedBytes += unusedRangeSize;
7630  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7631  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7632  }
7633 
7634  // 2. Process this allocation.
7635  // There is allocation with suballoc.offset, suballoc.size.
7636  outInfo.usedBytes += suballoc.size;
7637  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7638  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7639 
7640  // 3. Prepare for next iteration.
7641  lastOffset = suballoc.offset + suballoc.size;
7642  ++nextAlloc2ndIndex;
7643  }
7644  // We are at the end.
7645  else
7646  {
7647  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7648  if(lastOffset < freeSpace2ndTo1stEnd)
7649  {
7650  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7651  ++outInfo.unusedRangeCount;
7652  outInfo.unusedBytes += unusedRangeSize;
7653  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7654  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7655  }
7656 
7657  // End of loop.
7658  lastOffset = freeSpace2ndTo1stEnd;
7659  }
7660  }
7661  }
7662 
7663  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7664  const VkDeviceSize freeSpace1stTo2ndEnd =
7665  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7666  while(lastOffset < freeSpace1stTo2ndEnd)
7667  {
7668  // Find next non-null allocation or move nextAllocIndex to the end.
7669  while(nextAlloc1stIndex < suballoc1stCount &&
7670  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7671  {
7672  ++nextAlloc1stIndex;
7673  }
7674 
7675  // Found non-null allocation.
7676  if(nextAlloc1stIndex < suballoc1stCount)
7677  {
7678  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7679 
7680  // 1. Process free space before this allocation.
7681  if(lastOffset < suballoc.offset)
7682  {
7683  // There is free space from lastOffset to suballoc.offset.
7684  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7685  ++outInfo.unusedRangeCount;
7686  outInfo.unusedBytes += unusedRangeSize;
7687  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7688  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7689  }
7690 
7691  // 2. Process this allocation.
7692  // There is allocation with suballoc.offset, suballoc.size.
7693  outInfo.usedBytes += suballoc.size;
7694  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7695  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7696 
7697  // 3. Prepare for next iteration.
7698  lastOffset = suballoc.offset + suballoc.size;
7699  ++nextAlloc1stIndex;
7700  }
7701  // We are at the end.
7702  else
7703  {
7704  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7705  if(lastOffset < freeSpace1stTo2ndEnd)
7706  {
7707  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7708  ++outInfo.unusedRangeCount;
7709  outInfo.unusedBytes += unusedRangeSize;
7710  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7711  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7712  }
7713 
7714  // End of loop.
7715  lastOffset = freeSpace1stTo2ndEnd;
7716  }
7717  }
7718 
7719  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7720  {
7721  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7722  while(lastOffset < size)
7723  {
7724  // Find next non-null allocation or move nextAllocIndex to the end.
7725  while(nextAlloc2ndIndex != SIZE_MAX &&
7726  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7727  {
7728  --nextAlloc2ndIndex;
7729  }
7730 
7731  // Found non-null allocation.
7732  if(nextAlloc2ndIndex != SIZE_MAX)
7733  {
7734  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7735 
7736  // 1. Process free space before this allocation.
7737  if(lastOffset < suballoc.offset)
7738  {
7739  // There is free space from lastOffset to suballoc.offset.
7740  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7741  ++outInfo.unusedRangeCount;
7742  outInfo.unusedBytes += unusedRangeSize;
7743  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7744  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7745  }
7746 
7747  // 2. Process this allocation.
7748  // There is allocation with suballoc.offset, suballoc.size.
7749  outInfo.usedBytes += suballoc.size;
7750  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7751  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7752 
7753  // 3. Prepare for next iteration.
7754  lastOffset = suballoc.offset + suballoc.size;
7755  --nextAlloc2ndIndex;
7756  }
7757  // We are at the end.
7758  else
7759  {
7760  // There is free space from lastOffset to size.
7761  if(lastOffset < size)
7762  {
7763  const VkDeviceSize unusedRangeSize = size - lastOffset;
7764  ++outInfo.unusedRangeCount;
7765  outInfo.unusedBytes += unusedRangeSize;
7766  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7767  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7768  }
7769 
7770  // End of loop.
7771  lastOffset = size;
7772  }
7773  }
7774  }
7775 
7776  outInfo.unusedBytes = size - outInfo.usedBytes;
7777 }
7778 
7779 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
7780 {
7781  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7782  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7783  const VkDeviceSize size = GetSize();
7784  const size_t suballoc1stCount = suballocations1st.size();
7785  const size_t suballoc2ndCount = suballocations2nd.size();
7786 
7787  inoutStats.size += size;
7788 
7789  VkDeviceSize lastOffset = 0;
7790 
7791  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7792  {
7793  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7794  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
7795  while(lastOffset < freeSpace2ndTo1stEnd)
7796  {
7797  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7798  while(nextAlloc2ndIndex < suballoc2ndCount &&
7799  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7800  {
7801  ++nextAlloc2ndIndex;
7802  }
7803 
7804  // Found non-null allocation.
7805  if(nextAlloc2ndIndex < suballoc2ndCount)
7806  {
7807  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7808 
7809  // 1. Process free space before this allocation.
7810  if(lastOffset < suballoc.offset)
7811  {
7812  // There is free space from lastOffset to suballoc.offset.
7813  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7814  inoutStats.unusedSize += unusedRangeSize;
7815  ++inoutStats.unusedRangeCount;
7816  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7817  }
7818 
7819  // 2. Process this allocation.
7820  // There is allocation with suballoc.offset, suballoc.size.
7821  ++inoutStats.allocationCount;
7822 
7823  // 3. Prepare for next iteration.
7824  lastOffset = suballoc.offset + suballoc.size;
7825  ++nextAlloc2ndIndex;
7826  }
7827  // We are at the end.
7828  else
7829  {
7830  if(lastOffset < freeSpace2ndTo1stEnd)
7831  {
7832  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7833  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7834  inoutStats.unusedSize += unusedRangeSize;
7835  ++inoutStats.unusedRangeCount;
7836  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7837  }
7838 
7839  // End of loop.
7840  lastOffset = freeSpace2ndTo1stEnd;
7841  }
7842  }
7843  }
7844 
7845  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7846  const VkDeviceSize freeSpace1stTo2ndEnd =
7847  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7848  while(lastOffset < freeSpace1stTo2ndEnd)
7849  {
7850  // Find next non-null allocation or move nextAllocIndex to the end.
7851  while(nextAlloc1stIndex < suballoc1stCount &&
7852  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7853  {
7854  ++nextAlloc1stIndex;
7855  }
7856 
7857  // Found non-null allocation.
7858  if(nextAlloc1stIndex < suballoc1stCount)
7859  {
7860  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7861 
7862  // 1. Process free space before this allocation.
7863  if(lastOffset < suballoc.offset)
7864  {
7865  // There is free space from lastOffset to suballoc.offset.
7866  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7867  inoutStats.unusedSize += unusedRangeSize;
7868  ++inoutStats.unusedRangeCount;
7869  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7870  }
7871 
7872  // 2. Process this allocation.
7873  // There is allocation with suballoc.offset, suballoc.size.
7874  ++inoutStats.allocationCount;
7875 
7876  // 3. Prepare for next iteration.
7877  lastOffset = suballoc.offset + suballoc.size;
7878  ++nextAlloc1stIndex;
7879  }
7880  // We are at the end.
7881  else
7882  {
7883  if(lastOffset < freeSpace1stTo2ndEnd)
7884  {
7885  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7886  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7887  inoutStats.unusedSize += unusedRangeSize;
7888  ++inoutStats.unusedRangeCount;
7889  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7890  }
7891 
7892  // End of loop.
7893  lastOffset = freeSpace1stTo2ndEnd;
7894  }
7895  }
7896 
7897  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7898  {
7899  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7900  while(lastOffset < size)
7901  {
7902  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7903  while(nextAlloc2ndIndex != SIZE_MAX &&
7904  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7905  {
7906  --nextAlloc2ndIndex;
7907  }
7908 
7909  // Found non-null allocation.
7910  if(nextAlloc2ndIndex != SIZE_MAX)
7911  {
7912  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7913 
7914  // 1. Process free space before this allocation.
7915  if(lastOffset < suballoc.offset)
7916  {
7917  // There is free space from lastOffset to suballoc.offset.
7918  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7919  inoutStats.unusedSize += unusedRangeSize;
7920  ++inoutStats.unusedRangeCount;
7921  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7922  }
7923 
7924  // 2. Process this allocation.
7925  // There is allocation with suballoc.offset, suballoc.size.
7926  ++inoutStats.allocationCount;
7927 
7928  // 3. Prepare for next iteration.
7929  lastOffset = suballoc.offset + suballoc.size;
7930  --nextAlloc2ndIndex;
7931  }
7932  // We are at the end.
7933  else
7934  {
7935  if(lastOffset < size)
7936  {
7937  // There is free space from lastOffset to size.
7938  const VkDeviceSize unusedRangeSize = size - lastOffset;
7939  inoutStats.unusedSize += unusedRangeSize;
7940  ++inoutStats.unusedRangeCount;
7941  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7942  }
7943 
7944  // End of loop.
7945  lastOffset = size;
7946  }
7947  }
7948  }
7949 }
7950 
7951 #if VMA_STATS_STRING_ENABLED
7952 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
7953 {
7954  const VkDeviceSize size = GetSize();
7955  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7956  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7957  const size_t suballoc1stCount = suballocations1st.size();
7958  const size_t suballoc2ndCount = suballocations2nd.size();
7959 
7960  // FIRST PASS
7961 
7962  size_t unusedRangeCount = 0;
7963  VkDeviceSize usedBytes = 0;
7964 
7965  VkDeviceSize lastOffset = 0;
7966 
7967  size_t alloc2ndCount = 0;
7968  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7969  {
7970  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7971  size_t nextAlloc2ndIndex = 0;
7972  while(lastOffset < freeSpace2ndTo1stEnd)
7973  {
7974  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7975  while(nextAlloc2ndIndex < suballoc2ndCount &&
7976  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7977  {
7978  ++nextAlloc2ndIndex;
7979  }
7980 
7981  // Found non-null allocation.
7982  if(nextAlloc2ndIndex < suballoc2ndCount)
7983  {
7984  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7985 
7986  // 1. Process free space before this allocation.
7987  if(lastOffset < suballoc.offset)
7988  {
7989  // There is free space from lastOffset to suballoc.offset.
7990  ++unusedRangeCount;
7991  }
7992 
7993  // 2. Process this allocation.
7994  // There is allocation with suballoc.offset, suballoc.size.
7995  ++alloc2ndCount;
7996  usedBytes += suballoc.size;
7997 
7998  // 3. Prepare for next iteration.
7999  lastOffset = suballoc.offset + suballoc.size;
8000  ++nextAlloc2ndIndex;
8001  }
8002  // We are at the end.
8003  else
8004  {
8005  if(lastOffset < freeSpace2ndTo1stEnd)
8006  {
8007  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8008  ++unusedRangeCount;
8009  }
8010 
8011  // End of loop.
8012  lastOffset = freeSpace2ndTo1stEnd;
8013  }
8014  }
8015  }
8016 
8017  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8018  size_t alloc1stCount = 0;
8019  const VkDeviceSize freeSpace1stTo2ndEnd =
8020  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8021  while(lastOffset < freeSpace1stTo2ndEnd)
8022  {
8023  // Find next non-null allocation or move nextAllocIndex to the end.
8024  while(nextAlloc1stIndex < suballoc1stCount &&
8025  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8026  {
8027  ++nextAlloc1stIndex;
8028  }
8029 
8030  // Found non-null allocation.
8031  if(nextAlloc1stIndex < suballoc1stCount)
8032  {
8033  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8034 
8035  // 1. Process free space before this allocation.
8036  if(lastOffset < suballoc.offset)
8037  {
8038  // There is free space from lastOffset to suballoc.offset.
8039  ++unusedRangeCount;
8040  }
8041 
8042  // 2. Process this allocation.
8043  // There is allocation with suballoc.offset, suballoc.size.
8044  ++alloc1stCount;
8045  usedBytes += suballoc.size;
8046 
8047  // 3. Prepare for next iteration.
8048  lastOffset = suballoc.offset + suballoc.size;
8049  ++nextAlloc1stIndex;
8050  }
8051  // We are at the end.
8052  else
8053  {
8054  if(lastOffset < size)
8055  {
8056  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8057  ++unusedRangeCount;
8058  }
8059 
8060  // End of loop.
8061  lastOffset = freeSpace1stTo2ndEnd;
8062  }
8063  }
8064 
8065  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8066  {
8067  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8068  while(lastOffset < size)
8069  {
8070  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8071  while(nextAlloc2ndIndex != SIZE_MAX &&
8072  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8073  {
8074  --nextAlloc2ndIndex;
8075  }
8076 
8077  // Found non-null allocation.
8078  if(nextAlloc2ndIndex != SIZE_MAX)
8079  {
8080  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8081 
8082  // 1. Process free space before this allocation.
8083  if(lastOffset < suballoc.offset)
8084  {
8085  // There is free space from lastOffset to suballoc.offset.
8086  ++unusedRangeCount;
8087  }
8088 
8089  // 2. Process this allocation.
8090  // There is allocation with suballoc.offset, suballoc.size.
8091  ++alloc2ndCount;
8092  usedBytes += suballoc.size;
8093 
8094  // 3. Prepare for next iteration.
8095  lastOffset = suballoc.offset + suballoc.size;
8096  --nextAlloc2ndIndex;
8097  }
8098  // We are at the end.
8099  else
8100  {
8101  if(lastOffset < size)
8102  {
8103  // There is free space from lastOffset to size.
8104  ++unusedRangeCount;
8105  }
8106 
8107  // End of loop.
8108  lastOffset = size;
8109  }
8110  }
8111  }
8112 
8113  const VkDeviceSize unusedBytes = size - usedBytes;
8114  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8115 
8116  // SECOND PASS
8117  lastOffset = 0;
8118 
8119  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8120  {
8121  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8122  size_t nextAlloc2ndIndex = 0;
8123  while(lastOffset < freeSpace2ndTo1stEnd)
8124  {
8125  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8126  while(nextAlloc2ndIndex < suballoc2ndCount &&
8127  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8128  {
8129  ++nextAlloc2ndIndex;
8130  }
8131 
8132  // Found non-null allocation.
8133  if(nextAlloc2ndIndex < suballoc2ndCount)
8134  {
8135  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8136 
8137  // 1. Process free space before this allocation.
8138  if(lastOffset < suballoc.offset)
8139  {
8140  // There is free space from lastOffset to suballoc.offset.
8141  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8142  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8143  }
8144 
8145  // 2. Process this allocation.
8146  // There is allocation with suballoc.offset, suballoc.size.
8147  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8148 
8149  // 3. Prepare for next iteration.
8150  lastOffset = suballoc.offset + suballoc.size;
8151  ++nextAlloc2ndIndex;
8152  }
8153  // We are at the end.
8154  else
8155  {
8156  if(lastOffset < freeSpace2ndTo1stEnd)
8157  {
8158  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8159  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8160  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8161  }
8162 
8163  // End of loop.
8164  lastOffset = freeSpace2ndTo1stEnd;
8165  }
8166  }
8167  }
8168 
8169  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8170  while(lastOffset < freeSpace1stTo2ndEnd)
8171  {
8172  // Find next non-null allocation or move nextAllocIndex to the end.
8173  while(nextAlloc1stIndex < suballoc1stCount &&
8174  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8175  {
8176  ++nextAlloc1stIndex;
8177  }
8178 
8179  // Found non-null allocation.
8180  if(nextAlloc1stIndex < suballoc1stCount)
8181  {
8182  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8183 
8184  // 1. Process free space before this allocation.
8185  if(lastOffset < suballoc.offset)
8186  {
8187  // There is free space from lastOffset to suballoc.offset.
8188  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8189  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8190  }
8191 
8192  // 2. Process this allocation.
8193  // There is allocation with suballoc.offset, suballoc.size.
8194  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8195 
8196  // 3. Prepare for next iteration.
8197  lastOffset = suballoc.offset + suballoc.size;
8198  ++nextAlloc1stIndex;
8199  }
8200  // We are at the end.
8201  else
8202  {
8203  if(lastOffset < freeSpace1stTo2ndEnd)
8204  {
8205  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8206  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8207  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8208  }
8209 
8210  // End of loop.
8211  lastOffset = freeSpace1stTo2ndEnd;
8212  }
8213  }
8214 
8215  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8216  {
8217  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8218  while(lastOffset < size)
8219  {
8220  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8221  while(nextAlloc2ndIndex != SIZE_MAX &&
8222  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8223  {
8224  --nextAlloc2ndIndex;
8225  }
8226 
8227  // Found non-null allocation.
8228  if(nextAlloc2ndIndex != SIZE_MAX)
8229  {
8230  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8231 
8232  // 1. Process free space before this allocation.
8233  if(lastOffset < suballoc.offset)
8234  {
8235  // There is free space from lastOffset to suballoc.offset.
8236  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8237  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8238  }
8239 
8240  // 2. Process this allocation.
8241  // There is allocation with suballoc.offset, suballoc.size.
8242  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8243 
8244  // 3. Prepare for next iteration.
8245  lastOffset = suballoc.offset + suballoc.size;
8246  --nextAlloc2ndIndex;
8247  }
8248  // We are at the end.
8249  else
8250  {
8251  if(lastOffset < size)
8252  {
8253  // There is free space from lastOffset to size.
8254  const VkDeviceSize unusedRangeSize = size - lastOffset;
8255  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8256  }
8257 
8258  // End of loop.
8259  lastOffset = size;
8260  }
8261  }
8262  }
8263 
8264  PrintDetailedMap_End(json);
8265 }
8266 #endif // #if VMA_STATS_STRING_ENABLED
8267 
8268 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8269  uint32_t currentFrameIndex,
8270  uint32_t frameInUseCount,
8271  VkDeviceSize bufferImageGranularity,
8272  VkDeviceSize allocSize,
8273  VkDeviceSize allocAlignment,
8274  bool upperAddress,
8275  VmaSuballocationType allocType,
8276  bool canMakeOtherLost,
8277  VmaAllocationRequest* pAllocationRequest)
8278 {
8279  VMA_ASSERT(allocSize > 0);
8280  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8281  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8282  VMA_HEAVY_ASSERT(Validate());
8283 
8284  const VkDeviceSize size = GetSize();
8285  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8286  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8287 
8288  if(upperAddress)
8289  {
8290  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8291  {
8292  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8293  return false;
8294  }
8295 
8296  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8297  if(allocSize > size)
8298  {
8299  return false;
8300  }
8301  VkDeviceSize resultBaseOffset = size - allocSize;
8302  if(!suballocations2nd.empty())
8303  {
8304  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8305  resultBaseOffset = lastSuballoc.offset - allocSize;
8306  if(allocSize > lastSuballoc.offset)
8307  {
8308  return false;
8309  }
8310  }
8311 
8312  // Start from offset equal to end of free space.
8313  VkDeviceSize resultOffset = resultBaseOffset;
8314 
8315  // Apply VMA_DEBUG_MARGIN at the end.
8316  if(VMA_DEBUG_MARGIN > 0)
8317  {
8318  if(resultOffset < VMA_DEBUG_MARGIN)
8319  {
8320  return false;
8321  }
8322  resultOffset -= VMA_DEBUG_MARGIN;
8323  }
8324 
8325  // Apply alignment.
8326  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8327 
8328  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8329  // Make bigger alignment if necessary.
8330  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8331  {
8332  bool bufferImageGranularityConflict = false;
8333  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8334  {
8335  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8336  if(VmaBlocksOnSamePage(nextSuballoc.offset, nextSuballoc.size, resultOffset, bufferImageGranularity))
8337  {
8338  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8339  {
8340  bufferImageGranularityConflict = true;
8341  break;
8342  }
8343  }
8344  else
8345  // Already on previous page.
8346  break;
8347  }
8348  if(bufferImageGranularityConflict)
8349  {
8350  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8351  }
8352  }
8353 
8354  // There is enough free space.
8355  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8356  suballocations1st.back().offset + suballocations1st.back().size :
8357  0;
8358  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8359  {
8360  // Check previous suballocations for BufferImageGranularity conflicts.
8361  // If conflict exists, allocation cannot be made here.
8362  if(bufferImageGranularity > 1)
8363  {
8364  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8365  {
8366  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8367  if(VmaBlocksOnSamePage(resultOffset, allocSize, prevSuballoc.offset, bufferImageGranularity))
8368  {
8369  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8370  {
8371  return false;
8372  }
8373  }
8374  else
8375  {
8376  // Already on next page.
8377  break;
8378  }
8379  }
8380  }
8381 
8382  // All tests passed: Success.
8383  pAllocationRequest->offset = resultOffset;
8384  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8385  pAllocationRequest->sumItemSize = 0;
8386  // pAllocationRequest->item unused.
8387  pAllocationRequest->itemsToMakeLostCount = 0;
8388  return true;
8389  }
8390  }
8391  else // !upperAddress
8392  {
8393  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8394  {
8395  // Try to allocate at the end of 1st vector.
8396 
8397  VkDeviceSize resultBaseOffset = 0;
8398  if(!suballocations1st.empty())
8399  {
8400  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8401  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8402  }
8403 
8404  // Start from offset equal to beginning of free space.
8405  VkDeviceSize resultOffset = resultBaseOffset;
8406 
8407  // Apply VMA_DEBUG_MARGIN at the beginning.
8408  if(VMA_DEBUG_MARGIN > 0)
8409  {
8410  resultOffset += VMA_DEBUG_MARGIN;
8411  }
8412 
8413  // Apply alignment.
8414  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8415 
8416  // Check previous suballocations for BufferImageGranularity conflicts.
8417  // Make bigger alignment if necessary.
8418  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8419  {
8420  bool bufferImageGranularityConflict = false;
8421  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8422  {
8423  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8424  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8425  {
8426  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8427  {
8428  bufferImageGranularityConflict = true;
8429  break;
8430  }
8431  }
8432  else
8433  // Already on previous page.
8434  break;
8435  }
8436  if(bufferImageGranularityConflict)
8437  {
8438  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8439  }
8440  }
8441 
8442  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8443  suballocations2nd.back().offset : size;
8444 
8445  // There is enough free space at the end after alignment.
8446  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8447  {
8448  // Check next suballocations for BufferImageGranularity conflicts.
8449  // If conflict exists, allocation cannot be made here.
8450  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8451  {
8452  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8453  {
8454  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8455  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8456  {
8457  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8458  {
8459  return false;
8460  }
8461  }
8462  else
8463  {
8464  // Already on previous page.
8465  break;
8466  }
8467  }
8468  }
8469 
8470  // All tests passed: Success.
8471  pAllocationRequest->offset = resultOffset;
8472  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8473  pAllocationRequest->sumItemSize = 0;
8474  // pAllocationRequest->item unused.
8475  pAllocationRequest->itemsToMakeLostCount = 0;
8476  return true;
8477  }
8478  }
8479 
8480  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8481  // beginning of 1st vector as the end of free space.
8482  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8483  {
8484  VMA_ASSERT(!suballocations1st.empty());
8485 
8486  VkDeviceSize resultBaseOffset = 0;
8487  if(!suballocations2nd.empty())
8488  {
8489  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8490  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8491  }
8492 
8493  // Start from offset equal to beginning of free space.
8494  VkDeviceSize resultOffset = resultBaseOffset;
8495 
8496  // Apply VMA_DEBUG_MARGIN at the beginning.
8497  if(VMA_DEBUG_MARGIN > 0)
8498  {
8499  resultOffset += VMA_DEBUG_MARGIN;
8500  }
8501 
8502  // Apply alignment.
8503  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8504 
8505  // Check previous suballocations for BufferImageGranularity conflicts.
8506  // Make bigger alignment if necessary.
8507  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8508  {
8509  bool bufferImageGranularityConflict = false;
8510  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8511  {
8512  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8513  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8514  {
8515  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8516  {
8517  bufferImageGranularityConflict = true;
8518  break;
8519  }
8520  }
8521  else
8522  // Already on previous page.
8523  break;
8524  }
8525  if(bufferImageGranularityConflict)
8526  {
8527  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8528  }
8529  }
8530 
8531  pAllocationRequest->itemsToMakeLostCount = 0;
8532  pAllocationRequest->sumItemSize = 0;
8533  size_t index1st = m_1stNullItemsBeginCount;
8534 
8535  if(canMakeOtherLost)
8536  {
8537  while(index1st < suballocations1st.size() &&
8538  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8539  {
8540  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8541  const VmaSuballocation& suballoc = suballocations1st[index1st];
8542  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8543  {
8544  // No problem.
8545  }
8546  else
8547  {
8548  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8549  if(suballoc.hAllocation->CanBecomeLost() &&
8550  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8551  {
8552  ++pAllocationRequest->itemsToMakeLostCount;
8553  pAllocationRequest->sumItemSize += suballoc.size;
8554  }
8555  else
8556  {
8557  return false;
8558  }
8559  }
8560  ++index1st;
8561  }
8562 
8563  // Check next suballocations for BufferImageGranularity conflicts.
8564  // If conflict exists, we must mark more allocations lost or fail.
8565  if(bufferImageGranularity > 1)
8566  {
8567  while(index1st < suballocations1st.size())
8568  {
8569  const VmaSuballocation& suballoc = suballocations1st[index1st];
8570  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8571  {
8572  if(suballoc.hAllocation != VK_NULL_HANDLE)
8573  {
8574  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8575  if(suballoc.hAllocation->CanBecomeLost() &&
8576  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8577  {
8578  ++pAllocationRequest->itemsToMakeLostCount;
8579  pAllocationRequest->sumItemSize += suballoc.size;
8580  }
8581  else
8582  {
8583  return false;
8584  }
8585  }
8586  }
8587  else
8588  {
8589  // Already on next page.
8590  break;
8591  }
8592  ++index1st;
8593  }
8594  }
8595  }
8596 
8597  // There is enough free space at the end after alignment.
8598  if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
8599  index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
8600  {
8601  // Check next suballocations for BufferImageGranularity conflicts.
8602  // If conflict exists, allocation cannot be made here.
8603  if(bufferImageGranularity > 1)
8604  {
8605  for(size_t nextSuballocIndex = index1st;
8606  nextSuballocIndex < suballocations1st.size();
8607  nextSuballocIndex++)
8608  {
8609  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8610  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8611  {
8612  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8613  {
8614  return false;
8615  }
8616  }
8617  else
8618  {
8619  // Already on next page.
8620  break;
8621  }
8622  }
8623  }
8624 
8625  // All tests passed: Success.
8626  pAllocationRequest->offset = resultOffset;
8627  pAllocationRequest->sumFreeSize =
8628  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8629  - resultBaseOffset
8630  - pAllocationRequest->sumItemSize;
8631  // pAllocationRequest->item unused.
8632  return true;
8633  }
8634  }
8635  }
8636 
8637  return false;
8638 }
8639 
8640 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8641  uint32_t currentFrameIndex,
8642  uint32_t frameInUseCount,
8643  VmaAllocationRequest* pAllocationRequest)
8644 {
8645  if(pAllocationRequest->itemsToMakeLostCount == 0)
8646  {
8647  return true;
8648  }
8649 
8650  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8651 
8652  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8653  size_t index1st = m_1stNullItemsBeginCount;
8654  size_t madeLostCount = 0;
8655  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8656  {
8657  VMA_ASSERT(index1st < suballocations1st.size());
8658  VmaSuballocation& suballoc = suballocations1st[index1st];
8659  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8660  {
8661  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8662  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8663  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8664  {
8665  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8666  suballoc.hAllocation = VK_NULL_HANDLE;
8667  m_SumFreeSize += suballoc.size;
8668  ++m_1stNullItemsMiddleCount;
8669  ++madeLostCount;
8670  }
8671  else
8672  {
8673  return false;
8674  }
8675  }
8676  ++index1st;
8677  }
8678 
8679  CleanupAfterFree();
8680  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8681 
8682  return true;
8683 }
8684 
8685 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8686 {
8687  uint32_t lostAllocationCount = 0;
8688 
8689  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8690  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8691  {
8692  VmaSuballocation& suballoc = suballocations1st[i];
8693  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8694  suballoc.hAllocation->CanBecomeLost() &&
8695  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8696  {
8697  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8698  suballoc.hAllocation = VK_NULL_HANDLE;
8699  ++m_1stNullItemsMiddleCount;
8700  m_SumFreeSize += suballoc.size;
8701  ++lostAllocationCount;
8702  }
8703  }
8704 
8705  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8706  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8707  {
8708  VmaSuballocation& suballoc = suballocations2nd[i];
8709  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8710  suballoc.hAllocation->CanBecomeLost() &&
8711  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8712  {
8713  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8714  suballoc.hAllocation = VK_NULL_HANDLE;
8715  ++m_2ndNullItemsCount;
8716  ++lostAllocationCount;
8717  }
8718  }
8719 
8720  if(lostAllocationCount)
8721  {
8722  CleanupAfterFree();
8723  }
8724 
8725  return lostAllocationCount;
8726 }
8727 
8728 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8729 {
8730  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8731  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8732  {
8733  const VmaSuballocation& suballoc = suballocations1st[i];
8734  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8735  {
8736  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8737  {
8738  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8739  return VK_ERROR_VALIDATION_FAILED_EXT;
8740  }
8741  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8742  {
8743  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8744  return VK_ERROR_VALIDATION_FAILED_EXT;
8745  }
8746  }
8747  }
8748 
8749  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8750  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8751  {
8752  const VmaSuballocation& suballoc = suballocations2nd[i];
8753  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8754  {
8755  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8756  {
8757  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8758  return VK_ERROR_VALIDATION_FAILED_EXT;
8759  }
8760  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8761  {
8762  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8763  return VK_ERROR_VALIDATION_FAILED_EXT;
8764  }
8765  }
8766  }
8767 
8768  return VK_SUCCESS;
8769 }
8770 
8771 void VmaBlockMetadata_Linear::Alloc(
8772  const VmaAllocationRequest& request,
8773  VmaSuballocationType type,
8774  VkDeviceSize allocSize,
8775  bool upperAddress,
8776  VmaAllocation hAllocation)
8777 {
8778  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
8779 
8780  if(upperAddress)
8781  {
8782  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8783  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8784  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8785  suballocations2nd.push_back(newSuballoc);
8786  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8787  }
8788  else
8789  {
8790  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8791 
8792  // First allocation.
8793  if(suballocations1st.empty())
8794  {
8795  suballocations1st.push_back(newSuballoc);
8796  }
8797  else
8798  {
8799  // New allocation at the end of 1st vector.
8800  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
8801  {
8802  // Check if it fits before the end of the block.
8803  VMA_ASSERT(request.offset + allocSize <= GetSize());
8804  suballocations1st.push_back(newSuballoc);
8805  }
8806  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8807  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
8808  {
8809  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8810 
8811  switch(m_2ndVectorMode)
8812  {
8813  case SECOND_VECTOR_EMPTY:
8814  // First allocation from second part ring buffer.
8815  VMA_ASSERT(suballocations2nd.empty());
8816  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8817  break;
8818  case SECOND_VECTOR_RING_BUFFER:
8819  // 2-part ring buffer is already started.
8820  VMA_ASSERT(!suballocations2nd.empty());
8821  break;
8822  case SECOND_VECTOR_DOUBLE_STACK:
8823  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8824  break;
8825  default:
8826  VMA_ASSERT(0);
8827  }
8828 
8829  suballocations2nd.push_back(newSuballoc);
8830  }
8831  else
8832  {
8833  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8834  }
8835  }
8836  }
8837 
8838  m_SumFreeSize -= newSuballoc.size;
8839 }
8840 
8841 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
8842 {
8843  FreeAtOffset(allocation->GetOffset());
8844 }
8845 
8846 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
8847 {
8848  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8849  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8850 
8851  if(!suballocations1st.empty())
8852  {
8853  // First allocation: Mark it as next empty at the beginning.
8854  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8855  if(firstSuballoc.offset == offset)
8856  {
8857  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8858  firstSuballoc.hAllocation = VK_NULL_HANDLE;
8859  m_SumFreeSize += firstSuballoc.size;
8860  ++m_1stNullItemsBeginCount;
8861  CleanupAfterFree();
8862  return;
8863  }
8864  }
8865 
8866  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8867  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8868  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8869  {
8870  VmaSuballocation& lastSuballoc = suballocations2nd.back();
8871  if(lastSuballoc.offset == offset)
8872  {
8873  m_SumFreeSize += lastSuballoc.size;
8874  suballocations2nd.pop_back();
8875  CleanupAfterFree();
8876  return;
8877  }
8878  }
8879  // Last allocation in 1st vector.
8880  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8881  {
8882  VmaSuballocation& lastSuballoc = suballocations1st.back();
8883  if(lastSuballoc.offset == offset)
8884  {
8885  m_SumFreeSize += lastSuballoc.size;
8886  suballocations1st.pop_back();
8887  CleanupAfterFree();
8888  return;
8889  }
8890  }
8891 
8892  // Item from the middle of 1st vector.
8893  {
8894  VmaSuballocation refSuballoc;
8895  refSuballoc.offset = offset;
8896  // Rest of members stays uninitialized intentionally for better performance.
8897  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
8898  suballocations1st.begin() + m_1stNullItemsBeginCount,
8899  suballocations1st.end(),
8900  refSuballoc);
8901  if(it != suballocations1st.end())
8902  {
8903  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8904  it->hAllocation = VK_NULL_HANDLE;
8905  ++m_1stNullItemsMiddleCount;
8906  m_SumFreeSize += it->size;
8907  CleanupAfterFree();
8908  return;
8909  }
8910  }
8911 
8912  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8913  {
8914  // Item from the middle of 2nd vector.
8915  VmaSuballocation refSuballoc;
8916  refSuballoc.offset = offset;
8917  // Rest of members stays uninitialized intentionally for better performance.
8918  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8919  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
8920  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
8921  if(it != suballocations2nd.end())
8922  {
8923  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8924  it->hAllocation = VK_NULL_HANDLE;
8925  ++m_2ndNullItemsCount;
8926  m_SumFreeSize += it->size;
8927  CleanupAfterFree();
8928  return;
8929  }
8930  }
8931 
8932  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8933 }
8934 
8935 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8936 {
8937  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8938  const size_t suballocCount = AccessSuballocations1st().size();
8939  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8940 }
8941 
8942 void VmaBlockMetadata_Linear::CleanupAfterFree()
8943 {
8944  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8945  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8946 
8947  if(IsEmpty())
8948  {
8949  suballocations1st.clear();
8950  suballocations2nd.clear();
8951  m_1stNullItemsBeginCount = 0;
8952  m_1stNullItemsMiddleCount = 0;
8953  m_2ndNullItemsCount = 0;
8954  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8955  }
8956  else
8957  {
8958  const size_t suballoc1stCount = suballocations1st.size();
8959  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8960  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8961 
8962  // Find more null items at the beginning of 1st vector.
8963  while(m_1stNullItemsBeginCount < suballoc1stCount &&
8964  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
8965  {
8966  ++m_1stNullItemsBeginCount;
8967  --m_1stNullItemsMiddleCount;
8968  }
8969 
8970  // Find more null items at the end of 1st vector.
8971  while(m_1stNullItemsMiddleCount > 0 &&
8972  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
8973  {
8974  --m_1stNullItemsMiddleCount;
8975  suballocations1st.pop_back();
8976  }
8977 
8978  // Find more null items at the end of 2nd vector.
8979  while(m_2ndNullItemsCount > 0 &&
8980  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
8981  {
8982  --m_2ndNullItemsCount;
8983  suballocations2nd.pop_back();
8984  }
8985 
8986  if(ShouldCompact1st())
8987  {
8988  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8989  size_t srcIndex = m_1stNullItemsBeginCount;
8990  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8991  {
8992  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
8993  {
8994  ++srcIndex;
8995  }
8996  if(dstIndex != srcIndex)
8997  {
8998  suballocations1st[dstIndex] = suballocations1st[srcIndex];
8999  }
9000  ++srcIndex;
9001  }
9002  suballocations1st.resize(nonNullItemCount);
9003  m_1stNullItemsBeginCount = 0;
9004  m_1stNullItemsMiddleCount = 0;
9005  }
9006 
9007  // 2nd vector became empty.
9008  if(suballocations2nd.empty())
9009  {
9010  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9011  }
9012 
9013  // 1st vector became empty.
9014  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9015  {
9016  suballocations1st.clear();
9017  m_1stNullItemsBeginCount = 0;
9018 
9019  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9020  {
9021  // Swap 1st with 2nd. Now 2nd is empty.
9022  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9023  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9024  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9025  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9026  {
9027  ++m_1stNullItemsBeginCount;
9028  --m_1stNullItemsMiddleCount;
9029  }
9030  m_2ndNullItemsCount = 0;
9031  m_1stVectorIndex ^= 1;
9032  }
9033  }
9034  }
9035 
9036  VMA_HEAVY_ASSERT(Validate());
9037 }
9038 
9039 
9041 // class VmaDeviceMemoryBlock
9042 
9043 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9044  m_pMetadata(VMA_NULL),
9045  m_MemoryTypeIndex(UINT32_MAX),
9046  m_Id(0),
9047  m_hMemory(VK_NULL_HANDLE),
9048  m_MapCount(0),
9049  m_pMappedData(VMA_NULL)
9050 {
9051 }
9052 
9053 void VmaDeviceMemoryBlock::Init(
9054  VmaAllocator hAllocator,
9055  uint32_t newMemoryTypeIndex,
9056  VkDeviceMemory newMemory,
9057  VkDeviceSize newSize,
9058  uint32_t id,
9059  bool linearAlgorithm)
9060 {
9061  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9062 
9063  m_MemoryTypeIndex = newMemoryTypeIndex;
9064  m_Id = id;
9065  m_hMemory = newMemory;
9066 
9067  if(linearAlgorithm)
9068  {
9069  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9070  }
9071  else
9072  {
9073  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9074  }
9075  m_pMetadata->Init(newSize);
9076 }
9077 
9078 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9079 {
9080  // This is the most important assert in the entire library.
9081  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9082  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9083 
9084  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9085  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9086  m_hMemory = VK_NULL_HANDLE;
9087 
9088  vma_delete(allocator, m_pMetadata);
9089  m_pMetadata = VMA_NULL;
9090 }
9091 
9092 bool VmaDeviceMemoryBlock::Validate() const
9093 {
9094  if((m_hMemory == VK_NULL_HANDLE) ||
9095  (m_pMetadata->GetSize() == 0))
9096  {
9097  return false;
9098  }
9099 
9100  return m_pMetadata->Validate();
9101 }
9102 
9103 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9104 {
9105  void* pData = nullptr;
9106  VkResult res = Map(hAllocator, 1, &pData);
9107  if(res != VK_SUCCESS)
9108  {
9109  return res;
9110  }
9111 
9112  res = m_pMetadata->CheckCorruption(pData);
9113 
9114  Unmap(hAllocator, 1);
9115 
9116  return res;
9117 }
9118 
9119 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9120 {
9121  if(count == 0)
9122  {
9123  return VK_SUCCESS;
9124  }
9125 
9126  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9127  if(m_MapCount != 0)
9128  {
9129  m_MapCount += count;
9130  VMA_ASSERT(m_pMappedData != VMA_NULL);
9131  if(ppData != VMA_NULL)
9132  {
9133  *ppData = m_pMappedData;
9134  }
9135  return VK_SUCCESS;
9136  }
9137  else
9138  {
9139  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9140  hAllocator->m_hDevice,
9141  m_hMemory,
9142  0, // offset
9143  VK_WHOLE_SIZE,
9144  0, // flags
9145  &m_pMappedData);
9146  if(result == VK_SUCCESS)
9147  {
9148  if(ppData != VMA_NULL)
9149  {
9150  *ppData = m_pMappedData;
9151  }
9152  m_MapCount = count;
9153  }
9154  return result;
9155  }
9156 }
9157 
9158 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9159 {
9160  if(count == 0)
9161  {
9162  return;
9163  }
9164 
9165  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9166  if(m_MapCount >= count)
9167  {
9168  m_MapCount -= count;
9169  if(m_MapCount == 0)
9170  {
9171  m_pMappedData = VMA_NULL;
9172  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9173  }
9174  }
9175  else
9176  {
9177  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9178  }
9179 }
9180 
9181 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9182 {
9183  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9184  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9185 
9186  void* pData;
9187  VkResult res = Map(hAllocator, 1, &pData);
9188  if(res != VK_SUCCESS)
9189  {
9190  return res;
9191  }
9192 
9193  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9194  VmaWriteMagicValue(pData, allocOffset + allocSize);
9195 
9196  Unmap(hAllocator, 1);
9197 
9198  return VK_SUCCESS;
9199 }
9200 
9201 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9202 {
9203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9205 
9206  void* pData;
9207  VkResult res = Map(hAllocator, 1, &pData);
9208  if(res != VK_SUCCESS)
9209  {
9210  return res;
9211  }
9212 
9213  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
9214  {
9215  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
9216  }
9217  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
9218  {
9219  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
9220  }
9221 
9222  Unmap(hAllocator, 1);
9223 
9224  return VK_SUCCESS;
9225 }
9226 
9227 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
9228  const VmaAllocator hAllocator,
9229  const VmaAllocation hAllocation,
9230  VkBuffer hBuffer)
9231 {
9232  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9233  hAllocation->GetBlock() == this);
9234  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9235  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9236  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
9237  hAllocator->m_hDevice,
9238  hBuffer,
9239  m_hMemory,
9240  hAllocation->GetOffset());
9241 }
9242 
9243 VkResult VmaDeviceMemoryBlock::BindImageMemory(
9244  const VmaAllocator hAllocator,
9245  const VmaAllocation hAllocation,
9246  VkImage hImage)
9247 {
9248  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9249  hAllocation->GetBlock() == this);
9250  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9251  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9252  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
9253  hAllocator->m_hDevice,
9254  hImage,
9255  m_hMemory,
9256  hAllocation->GetOffset());
9257 }
9258 
9259 static void InitStatInfo(VmaStatInfo& outInfo)
9260 {
9261  memset(&outInfo, 0, sizeof(outInfo));
9262  outInfo.allocationSizeMin = UINT64_MAX;
9263  outInfo.unusedRangeSizeMin = UINT64_MAX;
9264 }
9265 
9266 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
9267 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
9268 {
9269  inoutInfo.blockCount += srcInfo.blockCount;
9270  inoutInfo.allocationCount += srcInfo.allocationCount;
9271  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
9272  inoutInfo.usedBytes += srcInfo.usedBytes;
9273  inoutInfo.unusedBytes += srcInfo.unusedBytes;
9274  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
9275  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
9276  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
9277  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
9278 }
9279 
9280 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
9281 {
9282  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
9283  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
9284  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
9285  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
9286 }
9287 
9288 VmaPool_T::VmaPool_T(
9289  VmaAllocator hAllocator,
9290  const VmaPoolCreateInfo& createInfo) :
9291  m_BlockVector(
9292  hAllocator,
9293  createInfo.memoryTypeIndex,
9294  createInfo.blockSize,
9295  createInfo.minBlockCount,
9296  createInfo.maxBlockCount,
9297  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
9298  createInfo.frameInUseCount,
9299  true, // isCustomPool
9300  (createInfo.flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0), // linearAlgorithm
9301  m_Id(0)
9302 {
9303 }
9304 
9305 VmaPool_T::~VmaPool_T()
9306 {
9307 }
9308 
9309 #if VMA_STATS_STRING_ENABLED
9310 
9311 #endif // #if VMA_STATS_STRING_ENABLED
9312 
9313 VmaBlockVector::VmaBlockVector(
9314  VmaAllocator hAllocator,
9315  uint32_t memoryTypeIndex,
9316  VkDeviceSize preferredBlockSize,
9317  size_t minBlockCount,
9318  size_t maxBlockCount,
9319  VkDeviceSize bufferImageGranularity,
9320  uint32_t frameInUseCount,
9321  bool isCustomPool,
9322  bool linearAlgorithm) :
9323  m_hAllocator(hAllocator),
9324  m_MemoryTypeIndex(memoryTypeIndex),
9325  m_PreferredBlockSize(preferredBlockSize),
9326  m_MinBlockCount(minBlockCount),
9327  m_MaxBlockCount(maxBlockCount),
9328  m_BufferImageGranularity(bufferImageGranularity),
9329  m_FrameInUseCount(frameInUseCount),
9330  m_IsCustomPool(isCustomPool),
9331  m_LinearAlgorithm(linearAlgorithm),
9332  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
9333  m_HasEmptyBlock(false),
9334  m_pDefragmentator(VMA_NULL),
9335  m_NextBlockId(0)
9336 {
9337 }
9338 
9339 VmaBlockVector::~VmaBlockVector()
9340 {
9341  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
9342 
9343  for(size_t i = m_Blocks.size(); i--; )
9344  {
9345  m_Blocks[i]->Destroy(m_hAllocator);
9346  vma_delete(m_hAllocator, m_Blocks[i]);
9347  }
9348 }
9349 
9350 VkResult VmaBlockVector::CreateMinBlocks()
9351 {
9352  for(size_t i = 0; i < m_MinBlockCount; ++i)
9353  {
9354  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
9355  if(res != VK_SUCCESS)
9356  {
9357  return res;
9358  }
9359  }
9360  return VK_SUCCESS;
9361 }
9362 
9363 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
9364 {
9365  pStats->size = 0;
9366  pStats->unusedSize = 0;
9367  pStats->allocationCount = 0;
9368  pStats->unusedRangeCount = 0;
9369  pStats->unusedRangeSizeMax = 0;
9370 
9371  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9372 
9373  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9374  {
9375  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
9376  VMA_ASSERT(pBlock);
9377  VMA_HEAVY_ASSERT(pBlock->Validate());
9378  pBlock->m_pMetadata->AddPoolStats(*pStats);
9379  }
9380 }
9381 
9382 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
9383 {
9384  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9385  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
9386  (VMA_DEBUG_MARGIN > 0) &&
9387  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
9388 }
9389 
9390 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
9391 
9392 VkResult VmaBlockVector::Allocate(
9393  VmaPool hCurrentPool,
9394  uint32_t currentFrameIndex,
9395  VkDeviceSize size,
9396  VkDeviceSize alignment,
9397  const VmaAllocationCreateInfo& createInfo,
9398  VmaSuballocationType suballocType,
9399  VmaAllocation* pAllocation)
9400 {
9401  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
9402  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
9403  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9404  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
9405  const bool canCreateNewBlock =
9406  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
9407  (m_Blocks.size() < m_MaxBlockCount);
9408 
9409  // Upper address can only be used with linear allocator.
9410  if(isUpperAddress && !m_LinearAlgorithm)
9411  {
9412  return VK_ERROR_FEATURE_NOT_PRESENT;
9413  }
9414 
9415  // Early reject: requested allocation size is larger that maximum block size for this block vector.
9416  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
9417  {
9418  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9419  }
9420 
9421  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9422 
9423  /*
9424  Under certain condition, this whole section can be skipped for optimization, so
9425  we move on directly to trying to allocate with canMakeOtherLost. That's the case
9426  e.g. for custom pools with linear algorithm.
9427  */
9428  if(!canMakeOtherLost || canCreateNewBlock)
9429  {
9430  // 1. Search existing allocations. Try to allocate without making other allocations lost.
9431  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9432  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9433  {
9434  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9435  VMA_ASSERT(pCurrBlock);
9436  VmaAllocationRequest currRequest = {};
9437  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9438  currentFrameIndex,
9439  m_FrameInUseCount,
9440  m_BufferImageGranularity,
9441  size,
9442  alignment,
9443  isUpperAddress,
9444  suballocType,
9445  false, // canMakeOtherLost
9446  &currRequest))
9447  {
9448  // Allocate from pCurrBlock.
9449  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
9450 
9451  if(mapped)
9452  {
9453  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
9454  if(res != VK_SUCCESS)
9455  {
9456  return res;
9457  }
9458  }
9459 
9460  // We no longer have an empty Allocation.
9461  if(pCurrBlock->m_pMetadata->IsEmpty())
9462  {
9463  m_HasEmptyBlock = false;
9464  }
9465 
9466  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9467  pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
9468  (*pAllocation)->InitBlockAllocation(
9469  hCurrentPool,
9470  pCurrBlock,
9471  currRequest.offset,
9472  alignment,
9473  size,
9474  suballocType,
9475  mapped,
9476  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9477  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
9478  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9479  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9480  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9481  {
9482  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9483  }
9484  if(IsCorruptionDetectionEnabled())
9485  {
9486  VkResult res = pCurrBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
9487  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9488  }
9489  return VK_SUCCESS;
9490  }
9491  }
9492 
9493  // 2. Try to create new block.
9494  if(canCreateNewBlock)
9495  {
9496  // Calculate optimal size for new block.
9497  VkDeviceSize newBlockSize = m_PreferredBlockSize;
9498  uint32_t newBlockSizeShift = 0;
9499  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
9500 
9501  // Allocating blocks of other sizes is allowed only in default pools.
9502  // In custom pools block size is fixed.
9503  if(m_IsCustomPool == false)
9504  {
9505  // Allocate 1/8, 1/4, 1/2 as first blocks.
9506  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
9507  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
9508  {
9509  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9510  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
9511  {
9512  newBlockSize = smallerNewBlockSize;
9513  ++newBlockSizeShift;
9514  }
9515  else
9516  {
9517  break;
9518  }
9519  }
9520  }
9521 
9522  size_t newBlockIndex = 0;
9523  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
9524  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
9525  if(m_IsCustomPool == false)
9526  {
9527  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
9528  {
9529  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9530  if(smallerNewBlockSize >= size)
9531  {
9532  newBlockSize = smallerNewBlockSize;
9533  ++newBlockSizeShift;
9534  res = CreateBlock(newBlockSize, &newBlockIndex);
9535  }
9536  else
9537  {
9538  break;
9539  }
9540  }
9541  }
9542 
9543  if(res == VK_SUCCESS)
9544  {
9545  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
9546  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
9547 
9548  if(mapped)
9549  {
9550  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
9551  if(res != VK_SUCCESS)
9552  {
9553  return res;
9554  }
9555  }
9556 
9557  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
9558  VmaAllocationRequest allocRequest;
9559  if(pBlock->m_pMetadata->CreateAllocationRequest(
9560  currentFrameIndex,
9561  m_FrameInUseCount,
9562  m_BufferImageGranularity,
9563  size,
9564  alignment,
9565  isUpperAddress,
9566  suballocType,
9567  false, // canMakeOtherLost
9568  &allocRequest))
9569  {
9570  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9571  pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, isUpperAddress, *pAllocation);
9572  (*pAllocation)->InitBlockAllocation(
9573  hCurrentPool,
9574  pBlock,
9575  allocRequest.offset,
9576  alignment,
9577  size,
9578  suballocType,
9579  mapped,
9580  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9581  VMA_HEAVY_ASSERT(pBlock->Validate());
9582  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
9583  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9584  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9585  {
9586  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9587  }
9588  if(IsCorruptionDetectionEnabled())
9589  {
9590  res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, allocRequest.offset, size);
9591  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9592  }
9593  return VK_SUCCESS;
9594  }
9595  else
9596  {
9597  // Allocation from empty block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
9598  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9599  }
9600  }
9601  }
9602  }
9603 
9604  // 3. Try to allocate from existing blocks with making other allocations lost.
9605  if(canMakeOtherLost)
9606  {
9607  uint32_t tryIndex = 0;
9608  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
9609  {
9610  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
9611  VmaAllocationRequest bestRequest = {};
9612  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
9613 
9614  // 1. Search existing allocations.
9615  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9616  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9617  {
9618  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9619  VMA_ASSERT(pCurrBlock);
9620  VmaAllocationRequest currRequest = {};
9621  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9622  currentFrameIndex,
9623  m_FrameInUseCount,
9624  m_BufferImageGranularity,
9625  size,
9626  alignment,
9627  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
9628  suballocType,
9629  canMakeOtherLost,
9630  &currRequest))
9631  {
9632  const VkDeviceSize currRequestCost = currRequest.CalcCost();
9633  if(pBestRequestBlock == VMA_NULL ||
9634  currRequestCost < bestRequestCost)
9635  {
9636  pBestRequestBlock = pCurrBlock;
9637  bestRequest = currRequest;
9638  bestRequestCost = currRequestCost;
9639 
9640  if(bestRequestCost == 0)
9641  {
9642  break;
9643  }
9644  }
9645  }
9646  }
9647 
9648  if(pBestRequestBlock != VMA_NULL)
9649  {
9650  if(mapped)
9651  {
9652  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
9653  if(res != VK_SUCCESS)
9654  {
9655  return res;
9656  }
9657  }
9658 
9659  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
9660  currentFrameIndex,
9661  m_FrameInUseCount,
9662  &bestRequest))
9663  {
9664  // We no longer have an empty Allocation.
9665  if(pBestRequestBlock->m_pMetadata->IsEmpty())
9666  {
9667  m_HasEmptyBlock = false;
9668  }
9669  // Allocate from this pBlock.
9670  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9671  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
9672  (*pAllocation)->InitBlockAllocation(
9673  hCurrentPool,
9674  pBestRequestBlock,
9675  bestRequest.offset,
9676  alignment,
9677  size,
9678  suballocType,
9679  mapped,
9680  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9681  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
9682  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9683  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9684  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9685  {
9686  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9687  }
9688  if(IsCorruptionDetectionEnabled())
9689  {
9690  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
9691  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9692  }
9693  return VK_SUCCESS;
9694  }
9695  // else: Some allocations must have been touched while we are here. Next try.
9696  }
9697  else
9698  {
9699  // Could not find place in any of the blocks - break outer loop.
9700  break;
9701  }
9702  }
9703  /* Maximum number of tries exceeded - a very unlike event when many other
9704  threads are simultaneously touching allocations making it impossible to make
9705  lost at the same time as we try to allocate. */
9706  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
9707  {
9708  return VK_ERROR_TOO_MANY_OBJECTS;
9709  }
9710  }
9711 
9712  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9713 }
9714 
9715 void VmaBlockVector::Free(
9716  VmaAllocation hAllocation)
9717 {
9718  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
9719 
9720  // Scope for lock.
9721  {
9722  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9723 
9724  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9725 
9726  if(IsCorruptionDetectionEnabled())
9727  {
9728  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
9729  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
9730  }
9731 
9732  if(hAllocation->IsPersistentMap())
9733  {
9734  pBlock->Unmap(m_hAllocator, 1);
9735  }
9736 
9737  pBlock->m_pMetadata->Free(hAllocation);
9738  VMA_HEAVY_ASSERT(pBlock->Validate());
9739 
9740  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
9741 
9742  // pBlock became empty after this deallocation.
9743  if(pBlock->m_pMetadata->IsEmpty())
9744  {
9745  // Already has empty Allocation. We don't want to have two, so delete this one.
9746  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
9747  {
9748  pBlockToDelete = pBlock;
9749  Remove(pBlock);
9750  }
9751  // We now have first empty block.
9752  else
9753  {
9754  m_HasEmptyBlock = true;
9755  }
9756  }
9757  // pBlock didn't become empty, but we have another empty block - find and free that one.
9758  // (This is optional, heuristics.)
9759  else if(m_HasEmptyBlock)
9760  {
9761  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
9762  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
9763  {
9764  pBlockToDelete = pLastBlock;
9765  m_Blocks.pop_back();
9766  m_HasEmptyBlock = false;
9767  }
9768  }
9769 
9770  IncrementallySortBlocks();
9771  }
9772 
9773  // Destruction of a free Allocation. Deferred until this point, outside of mutex
9774  // lock, for performance reason.
9775  if(pBlockToDelete != VMA_NULL)
9776  {
9777  VMA_DEBUG_LOG(" Deleted empty allocation");
9778  pBlockToDelete->Destroy(m_hAllocator);
9779  vma_delete(m_hAllocator, pBlockToDelete);
9780  }
9781 }
9782 
9783 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
9784 {
9785  VkDeviceSize result = 0;
9786  for(size_t i = m_Blocks.size(); i--; )
9787  {
9788  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
9789  if(result >= m_PreferredBlockSize)
9790  {
9791  break;
9792  }
9793  }
9794  return result;
9795 }
9796 
9797 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
9798 {
9799  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9800  {
9801  if(m_Blocks[blockIndex] == pBlock)
9802  {
9803  VmaVectorRemove(m_Blocks, blockIndex);
9804  return;
9805  }
9806  }
9807  VMA_ASSERT(0);
9808 }
9809 
9810 void VmaBlockVector::IncrementallySortBlocks()
9811 {
9812  // Bubble sort only until first swap.
9813  for(size_t i = 1; i < m_Blocks.size(); ++i)
9814  {
9815  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
9816  {
9817  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
9818  return;
9819  }
9820  }
9821 }
9822 
9823 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
9824 {
9825  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
9826  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
9827  allocInfo.allocationSize = blockSize;
9828  VkDeviceMemory mem = VK_NULL_HANDLE;
9829  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
9830  if(res < 0)
9831  {
9832  return res;
9833  }
9834 
9835  // New VkDeviceMemory successfully created.
9836 
9837  // Create new Allocation for it.
9838  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
9839  pBlock->Init(
9840  m_hAllocator,
9841  m_MemoryTypeIndex,
9842  mem,
9843  allocInfo.allocationSize,
9844  m_NextBlockId++,
9845  m_LinearAlgorithm);
9846 
9847  m_Blocks.push_back(pBlock);
9848  if(pNewBlockIndex != VMA_NULL)
9849  {
9850  *pNewBlockIndex = m_Blocks.size() - 1;
9851  }
9852 
9853  return VK_SUCCESS;
9854 }
9855 
9856 #if VMA_STATS_STRING_ENABLED
9857 
9858 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
9859 {
9860  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9861 
9862  json.BeginObject();
9863 
9864  if(m_IsCustomPool)
9865  {
9866  json.WriteString("MemoryTypeIndex");
9867  json.WriteNumber(m_MemoryTypeIndex);
9868 
9869  json.WriteString("BlockSize");
9870  json.WriteNumber(m_PreferredBlockSize);
9871 
9872  json.WriteString("BlockCount");
9873  json.BeginObject(true);
9874  if(m_MinBlockCount > 0)
9875  {
9876  json.WriteString("Min");
9877  json.WriteNumber((uint64_t)m_MinBlockCount);
9878  }
9879  if(m_MaxBlockCount < SIZE_MAX)
9880  {
9881  json.WriteString("Max");
9882  json.WriteNumber((uint64_t)m_MaxBlockCount);
9883  }
9884  json.WriteString("Cur");
9885  json.WriteNumber((uint64_t)m_Blocks.size());
9886  json.EndObject();
9887 
9888  if(m_FrameInUseCount > 0)
9889  {
9890  json.WriteString("FrameInUseCount");
9891  json.WriteNumber(m_FrameInUseCount);
9892  }
9893 
9894  if(m_LinearAlgorithm)
9895  {
9896  json.WriteString("LinearAlgorithm");
9897  json.WriteBool(true);
9898  }
9899  }
9900  else
9901  {
9902  json.WriteString("PreferredBlockSize");
9903  json.WriteNumber(m_PreferredBlockSize);
9904  }
9905 
9906  json.WriteString("Blocks");
9907  json.BeginObject();
9908  for(size_t i = 0; i < m_Blocks.size(); ++i)
9909  {
9910  json.BeginString();
9911  json.ContinueString(m_Blocks[i]->GetId());
9912  json.EndString();
9913 
9914  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
9915  }
9916  json.EndObject();
9917 
9918  json.EndObject();
9919 }
9920 
9921 #endif // #if VMA_STATS_STRING_ENABLED
9922 
9923 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
9924  VmaAllocator hAllocator,
9925  uint32_t currentFrameIndex)
9926 {
9927  if(m_pDefragmentator == VMA_NULL)
9928  {
9929  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
9930  hAllocator,
9931  this,
9932  currentFrameIndex);
9933  }
9934 
9935  return m_pDefragmentator;
9936 }
9937 
9938 VkResult VmaBlockVector::Defragment(
9939  VmaDefragmentationStats* pDefragmentationStats,
9940  VkDeviceSize& maxBytesToMove,
9941  uint32_t& maxAllocationsToMove)
9942 {
9943  if(m_pDefragmentator == VMA_NULL)
9944  {
9945  return VK_SUCCESS;
9946  }
9947 
9948  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9949 
9950  // Defragment.
9951  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
9952 
9953  // Accumulate statistics.
9954  if(pDefragmentationStats != VMA_NULL)
9955  {
9956  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
9957  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
9958  pDefragmentationStats->bytesMoved += bytesMoved;
9959  pDefragmentationStats->allocationsMoved += allocationsMoved;
9960  VMA_ASSERT(bytesMoved <= maxBytesToMove);
9961  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
9962  maxBytesToMove -= bytesMoved;
9963  maxAllocationsToMove -= allocationsMoved;
9964  }
9965 
9966  // Free empty blocks.
9967  m_HasEmptyBlock = false;
9968  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
9969  {
9970  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
9971  if(pBlock->m_pMetadata->IsEmpty())
9972  {
9973  if(m_Blocks.size() > m_MinBlockCount)
9974  {
9975  if(pDefragmentationStats != VMA_NULL)
9976  {
9977  ++pDefragmentationStats->deviceMemoryBlocksFreed;
9978  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
9979  }
9980 
9981  VmaVectorRemove(m_Blocks, blockIndex);
9982  pBlock->Destroy(m_hAllocator);
9983  vma_delete(m_hAllocator, pBlock);
9984  }
9985  else
9986  {
9987  m_HasEmptyBlock = true;
9988  }
9989  }
9990  }
9991 
9992  return result;
9993 }
9994 
9995 void VmaBlockVector::DestroyDefragmentator()
9996 {
9997  if(m_pDefragmentator != VMA_NULL)
9998  {
9999  vma_delete(m_hAllocator, m_pDefragmentator);
10000  m_pDefragmentator = VMA_NULL;
10001  }
10002 }
10003 
10004 void VmaBlockVector::MakePoolAllocationsLost(
10005  uint32_t currentFrameIndex,
10006  size_t* pLostAllocationCount)
10007 {
10008  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10009  size_t lostAllocationCount = 0;
10010  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10011  {
10012  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10013  VMA_ASSERT(pBlock);
10014  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10015  }
10016  if(pLostAllocationCount != VMA_NULL)
10017  {
10018  *pLostAllocationCount = lostAllocationCount;
10019  }
10020 }
10021 
10022 VkResult VmaBlockVector::CheckCorruption()
10023 {
10024  if(!IsCorruptionDetectionEnabled())
10025  {
10026  return VK_ERROR_FEATURE_NOT_PRESENT;
10027  }
10028 
10029  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10030  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10031  {
10032  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10033  VMA_ASSERT(pBlock);
10034  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10035  if(res != VK_SUCCESS)
10036  {
10037  return res;
10038  }
10039  }
10040  return VK_SUCCESS;
10041 }
10042 
10043 void VmaBlockVector::AddStats(VmaStats* pStats)
10044 {
10045  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10046  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10047 
10048  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10049 
10050  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10051  {
10052  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10053  VMA_ASSERT(pBlock);
10054  VMA_HEAVY_ASSERT(pBlock->Validate());
10055  VmaStatInfo allocationStatInfo;
10056  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10057  VmaAddStatInfo(pStats->total, allocationStatInfo);
10058  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10059  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10060  }
10061 }
10062 
10064 // VmaDefragmentator members definition
10065 
10066 VmaDefragmentator::VmaDefragmentator(
10067  VmaAllocator hAllocator,
10068  VmaBlockVector* pBlockVector,
10069  uint32_t currentFrameIndex) :
10070  m_hAllocator(hAllocator),
10071  m_pBlockVector(pBlockVector),
10072  m_CurrentFrameIndex(currentFrameIndex),
10073  m_BytesMoved(0),
10074  m_AllocationsMoved(0),
10075  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
10076  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
10077 {
10078  VMA_ASSERT(!pBlockVector->UsesLinearAlgorithm());
10079 }
10080 
10081 VmaDefragmentator::~VmaDefragmentator()
10082 {
10083  for(size_t i = m_Blocks.size(); i--; )
10084  {
10085  vma_delete(m_hAllocator, m_Blocks[i]);
10086  }
10087 }
10088 
10089 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
10090 {
10091  AllocationInfo allocInfo;
10092  allocInfo.m_hAllocation = hAlloc;
10093  allocInfo.m_pChanged = pChanged;
10094  m_Allocations.push_back(allocInfo);
10095 }
10096 
10097 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
10098 {
10099  // It has already been mapped for defragmentation.
10100  if(m_pMappedDataForDefragmentation)
10101  {
10102  *ppMappedData = m_pMappedDataForDefragmentation;
10103  return VK_SUCCESS;
10104  }
10105 
10106  // It is originally mapped.
10107  if(m_pBlock->GetMappedData())
10108  {
10109  *ppMappedData = m_pBlock->GetMappedData();
10110  return VK_SUCCESS;
10111  }
10112 
10113  // Map on first usage.
10114  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
10115  *ppMappedData = m_pMappedDataForDefragmentation;
10116  return res;
10117 }
10118 
10119 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
10120 {
10121  if(m_pMappedDataForDefragmentation != VMA_NULL)
10122  {
10123  m_pBlock->Unmap(hAllocator, 1);
10124  }
10125 }
10126 
10127 VkResult VmaDefragmentator::DefragmentRound(
10128  VkDeviceSize maxBytesToMove,
10129  uint32_t maxAllocationsToMove)
10130 {
10131  if(m_Blocks.empty())
10132  {
10133  return VK_SUCCESS;
10134  }
10135 
10136  size_t srcBlockIndex = m_Blocks.size() - 1;
10137  size_t srcAllocIndex = SIZE_MAX;
10138  for(;;)
10139  {
10140  // 1. Find next allocation to move.
10141  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
10142  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
10143  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
10144  {
10145  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
10146  {
10147  // Finished: no more allocations to process.
10148  if(srcBlockIndex == 0)
10149  {
10150  return VK_SUCCESS;
10151  }
10152  else
10153  {
10154  --srcBlockIndex;
10155  srcAllocIndex = SIZE_MAX;
10156  }
10157  }
10158  else
10159  {
10160  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
10161  }
10162  }
10163 
10164  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
10165  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
10166 
10167  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
10168  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
10169  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
10170  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
10171 
10172  // 2. Try to find new place for this allocation in preceding or current block.
10173  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
10174  {
10175  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
10176  VmaAllocationRequest dstAllocRequest;
10177  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
10178  m_CurrentFrameIndex,
10179  m_pBlockVector->GetFrameInUseCount(),
10180  m_pBlockVector->GetBufferImageGranularity(),
10181  size,
10182  alignment,
10183  false, // upperAddress
10184  suballocType,
10185  false, // canMakeOtherLost
10186  &dstAllocRequest) &&
10187  MoveMakesSense(
10188  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
10189  {
10190  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
10191 
10192  // Reached limit on number of allocations or bytes to move.
10193  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
10194  (m_BytesMoved + size > maxBytesToMove))
10195  {
10196  return VK_INCOMPLETE;
10197  }
10198 
10199  void* pDstMappedData = VMA_NULL;
10200  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
10201  if(res != VK_SUCCESS)
10202  {
10203  return res;
10204  }
10205 
10206  void* pSrcMappedData = VMA_NULL;
10207  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
10208  if(res != VK_SUCCESS)
10209  {
10210  return res;
10211  }
10212 
10213  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
10214  memcpy(
10215  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
10216  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
10217  static_cast<size_t>(size));
10218 
10219  if(VMA_DEBUG_MARGIN > 0)
10220  {
10221  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
10222  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
10223  }
10224 
10225  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
10226  dstAllocRequest,
10227  suballocType,
10228  size,
10229  false, // upperAddress
10230  allocInfo.m_hAllocation);
10231  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
10232 
10233  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
10234 
10235  if(allocInfo.m_pChanged != VMA_NULL)
10236  {
10237  *allocInfo.m_pChanged = VK_TRUE;
10238  }
10239 
10240  ++m_AllocationsMoved;
10241  m_BytesMoved += size;
10242 
10243  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
10244 
10245  break;
10246  }
10247  }
10248 
10249  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
10250 
10251  if(srcAllocIndex > 0)
10252  {
10253  --srcAllocIndex;
10254  }
10255  else
10256  {
10257  if(srcBlockIndex > 0)
10258  {
10259  --srcBlockIndex;
10260  srcAllocIndex = SIZE_MAX;
10261  }
10262  else
10263  {
10264  return VK_SUCCESS;
10265  }
10266  }
10267  }
10268 }
10269 
10270 VkResult VmaDefragmentator::Defragment(
10271  VkDeviceSize maxBytesToMove,
10272  uint32_t maxAllocationsToMove)
10273 {
10274  if(m_Allocations.empty())
10275  {
10276  return VK_SUCCESS;
10277  }
10278 
10279  // Create block info for each block.
10280  const size_t blockCount = m_pBlockVector->m_Blocks.size();
10281  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10282  {
10283  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
10284  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
10285  m_Blocks.push_back(pBlockInfo);
10286  }
10287 
10288  // Sort them by m_pBlock pointer value.
10289  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
10290 
10291  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
10292  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
10293  {
10294  AllocationInfo& allocInfo = m_Allocations[blockIndex];
10295  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
10296  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
10297  {
10298  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
10299  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
10300  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
10301  {
10302  (*it)->m_Allocations.push_back(allocInfo);
10303  }
10304  else
10305  {
10306  VMA_ASSERT(0);
10307  }
10308  }
10309  }
10310  m_Allocations.clear();
10311 
10312  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10313  {
10314  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
10315  pBlockInfo->CalcHasNonMovableAllocations();
10316  pBlockInfo->SortAllocationsBySizeDescecnding();
10317  }
10318 
10319  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
10320  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
10321 
10322  // Execute defragmentation rounds (the main part).
10323  VkResult result = VK_SUCCESS;
10324  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
10325  {
10326  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
10327  }
10328 
10329  // Unmap blocks that were mapped for defragmentation.
10330  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10331  {
10332  m_Blocks[blockIndex]->Unmap(m_hAllocator);
10333  }
10334 
10335  return result;
10336 }
10337 
10338 bool VmaDefragmentator::MoveMakesSense(
10339  size_t dstBlockIndex, VkDeviceSize dstOffset,
10340  size_t srcBlockIndex, VkDeviceSize srcOffset)
10341 {
10342  if(dstBlockIndex < srcBlockIndex)
10343  {
10344  return true;
10345  }
10346  if(dstBlockIndex > srcBlockIndex)
10347  {
10348  return false;
10349  }
10350  if(dstOffset < srcOffset)
10351  {
10352  return true;
10353  }
10354  return false;
10355 }
10356 
10358 // VmaRecorder
10359 
10360 #if VMA_RECORDING_ENABLED
10361 
10362 VmaRecorder::VmaRecorder() :
10363  m_UseMutex(true),
10364  m_Flags(0),
10365  m_File(VMA_NULL),
10366  m_Freq(INT64_MAX),
10367  m_StartCounter(INT64_MAX)
10368 {
10369 }
10370 
10371 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
10372 {
10373  m_UseMutex = useMutex;
10374  m_Flags = settings.flags;
10375 
10376  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
10377  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
10378 
10379  // Open file for writing.
10380  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
10381  if(err != 0)
10382  {
10383  return VK_ERROR_INITIALIZATION_FAILED;
10384  }
10385 
10386  // Write header.
10387  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
10388  fprintf(m_File, "%s\n", "1,3");
10389 
10390  return VK_SUCCESS;
10391 }
10392 
10393 VmaRecorder::~VmaRecorder()
10394 {
10395  if(m_File != VMA_NULL)
10396  {
10397  fclose(m_File);
10398  }
10399 }
10400 
10401 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
10402 {
10403  CallParams callParams;
10404  GetBasicParams(callParams);
10405 
10406  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10407  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
10408  Flush();
10409 }
10410 
10411 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
10412 {
10413  CallParams callParams;
10414  GetBasicParams(callParams);
10415 
10416  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10417  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
10418  Flush();
10419 }
10420 
10421 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
10422 {
10423  CallParams callParams;
10424  GetBasicParams(callParams);
10425 
10426  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10427  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
10428  createInfo.memoryTypeIndex,
10429  createInfo.flags,
10430  createInfo.blockSize,
10431  createInfo.minBlockCount,
10432  createInfo.maxBlockCount,
10433  createInfo.frameInUseCount,
10434  pool);
10435  Flush();
10436 }
10437 
10438 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
10439 {
10440  CallParams callParams;
10441  GetBasicParams(callParams);
10442 
10443  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10444  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
10445  pool);
10446  Flush();
10447 }
10448 
10449 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
10450  const VkMemoryRequirements& vkMemReq,
10451  const VmaAllocationCreateInfo& createInfo,
10452  VmaAllocation allocation)
10453 {
10454  CallParams callParams;
10455  GetBasicParams(callParams);
10456 
10457  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10458  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10459  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10460  vkMemReq.size,
10461  vkMemReq.alignment,
10462  vkMemReq.memoryTypeBits,
10463  createInfo.flags,
10464  createInfo.usage,
10465  createInfo.requiredFlags,
10466  createInfo.preferredFlags,
10467  createInfo.memoryTypeBits,
10468  createInfo.pool,
10469  allocation,
10470  userDataStr.GetString());
10471  Flush();
10472 }
10473 
10474 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
10475  const VkMemoryRequirements& vkMemReq,
10476  bool requiresDedicatedAllocation,
10477  bool prefersDedicatedAllocation,
10478  const VmaAllocationCreateInfo& createInfo,
10479  VmaAllocation allocation)
10480 {
10481  CallParams callParams;
10482  GetBasicParams(callParams);
10483 
10484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10487  vkMemReq.size,
10488  vkMemReq.alignment,
10489  vkMemReq.memoryTypeBits,
10490  requiresDedicatedAllocation ? 1 : 0,
10491  prefersDedicatedAllocation ? 1 : 0,
10492  createInfo.flags,
10493  createInfo.usage,
10494  createInfo.requiredFlags,
10495  createInfo.preferredFlags,
10496  createInfo.memoryTypeBits,
10497  createInfo.pool,
10498  allocation,
10499  userDataStr.GetString());
10500  Flush();
10501 }
10502 
10503 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
10504  const VkMemoryRequirements& vkMemReq,
10505  bool requiresDedicatedAllocation,
10506  bool prefersDedicatedAllocation,
10507  const VmaAllocationCreateInfo& createInfo,
10508  VmaAllocation allocation)
10509 {
10510  CallParams callParams;
10511  GetBasicParams(callParams);
10512 
10513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10514  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10515  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10516  vkMemReq.size,
10517  vkMemReq.alignment,
10518  vkMemReq.memoryTypeBits,
10519  requiresDedicatedAllocation ? 1 : 0,
10520  prefersDedicatedAllocation ? 1 : 0,
10521  createInfo.flags,
10522  createInfo.usage,
10523  createInfo.requiredFlags,
10524  createInfo.preferredFlags,
10525  createInfo.memoryTypeBits,
10526  createInfo.pool,
10527  allocation,
10528  userDataStr.GetString());
10529  Flush();
10530 }
10531 
10532 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
10533  VmaAllocation allocation)
10534 {
10535  CallParams callParams;
10536  GetBasicParams(callParams);
10537 
10538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10539  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10540  allocation);
10541  Flush();
10542 }
10543 
10544 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
10545  VmaAllocation allocation,
10546  const void* pUserData)
10547 {
10548  CallParams callParams;
10549  GetBasicParams(callParams);
10550 
10551  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10552  UserDataString userDataStr(
10553  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
10554  pUserData);
10555  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10556  allocation,
10557  userDataStr.GetString());
10558  Flush();
10559 }
10560 
10561 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
10562  VmaAllocation allocation)
10563 {
10564  CallParams callParams;
10565  GetBasicParams(callParams);
10566 
10567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10568  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10569  allocation);
10570  Flush();
10571 }
10572 
10573 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
10574  VmaAllocation allocation)
10575 {
10576  CallParams callParams;
10577  GetBasicParams(callParams);
10578 
10579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10580  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10581  allocation);
10582  Flush();
10583 }
10584 
10585 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
10586  VmaAllocation allocation)
10587 {
10588  CallParams callParams;
10589  GetBasicParams(callParams);
10590 
10591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10592  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10593  allocation);
10594  Flush();
10595 }
10596 
10597 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
10598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10599 {
10600  CallParams callParams;
10601  GetBasicParams(callParams);
10602 
10603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10604  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10605  allocation,
10606  offset,
10607  size);
10608  Flush();
10609 }
10610 
10611 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
10612  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10613 {
10614  CallParams callParams;
10615  GetBasicParams(callParams);
10616 
10617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10618  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10619  allocation,
10620  offset,
10621  size);
10622  Flush();
10623 }
10624 
10625 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
10626  const VkBufferCreateInfo& bufCreateInfo,
10627  const VmaAllocationCreateInfo& allocCreateInfo,
10628  VmaAllocation allocation)
10629 {
10630  CallParams callParams;
10631  GetBasicParams(callParams);
10632 
10633  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10634  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10635  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10636  bufCreateInfo.flags,
10637  bufCreateInfo.size,
10638  bufCreateInfo.usage,
10639  bufCreateInfo.sharingMode,
10640  allocCreateInfo.flags,
10641  allocCreateInfo.usage,
10642  allocCreateInfo.requiredFlags,
10643  allocCreateInfo.preferredFlags,
10644  allocCreateInfo.memoryTypeBits,
10645  allocCreateInfo.pool,
10646  allocation,
10647  userDataStr.GetString());
10648  Flush();
10649 }
10650 
10651 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
10652  const VkImageCreateInfo& imageCreateInfo,
10653  const VmaAllocationCreateInfo& allocCreateInfo,
10654  VmaAllocation allocation)
10655 {
10656  CallParams callParams;
10657  GetBasicParams(callParams);
10658 
10659  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10660  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10661  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10662  imageCreateInfo.flags,
10663  imageCreateInfo.imageType,
10664  imageCreateInfo.format,
10665  imageCreateInfo.extent.width,
10666  imageCreateInfo.extent.height,
10667  imageCreateInfo.extent.depth,
10668  imageCreateInfo.mipLevels,
10669  imageCreateInfo.arrayLayers,
10670  imageCreateInfo.samples,
10671  imageCreateInfo.tiling,
10672  imageCreateInfo.usage,
10673  imageCreateInfo.sharingMode,
10674  imageCreateInfo.initialLayout,
10675  allocCreateInfo.flags,
10676  allocCreateInfo.usage,
10677  allocCreateInfo.requiredFlags,
10678  allocCreateInfo.preferredFlags,
10679  allocCreateInfo.memoryTypeBits,
10680  allocCreateInfo.pool,
10681  allocation,
10682  userDataStr.GetString());
10683  Flush();
10684 }
10685 
10686 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
10687  VmaAllocation allocation)
10688 {
10689  CallParams callParams;
10690  GetBasicParams(callParams);
10691 
10692  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10693  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
10694  allocation);
10695  Flush();
10696 }
10697 
10698 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
10699  VmaAllocation allocation)
10700 {
10701  CallParams callParams;
10702  GetBasicParams(callParams);
10703 
10704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10705  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
10706  allocation);
10707  Flush();
10708 }
10709 
10710 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
10711  VmaAllocation allocation)
10712 {
10713  CallParams callParams;
10714  GetBasicParams(callParams);
10715 
10716  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10717  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10718  allocation);
10719  Flush();
10720 }
10721 
10722 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
10723  VmaAllocation allocation)
10724 {
10725  CallParams callParams;
10726  GetBasicParams(callParams);
10727 
10728  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10729  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
10730  allocation);
10731  Flush();
10732 }
10733 
10734 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
10735  VmaPool pool)
10736 {
10737  CallParams callParams;
10738  GetBasicParams(callParams);
10739 
10740  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10741  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
10742  pool);
10743  Flush();
10744 }
10745 
10746 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
10747 {
10748  if(pUserData != VMA_NULL)
10749  {
10750  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
10751  {
10752  m_Str = (const char*)pUserData;
10753  }
10754  else
10755  {
10756  sprintf_s(m_PtrStr, "%p", pUserData);
10757  m_Str = m_PtrStr;
10758  }
10759  }
10760  else
10761  {
10762  m_Str = "";
10763  }
10764 }
10765 
10766 void VmaRecorder::WriteConfiguration(
10767  const VkPhysicalDeviceProperties& devProps,
10768  const VkPhysicalDeviceMemoryProperties& memProps,
10769  bool dedicatedAllocationExtensionEnabled)
10770 {
10771  fprintf(m_File, "Config,Begin\n");
10772 
10773  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
10774  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
10775  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
10776  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
10777  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
10778  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
10779 
10780  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
10781  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
10782  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
10783 
10784  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
10785  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
10786  {
10787  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
10788  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
10789  }
10790  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
10791  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
10792  {
10793  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
10794  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
10795  }
10796 
10797  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
10798 
10799  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
10800  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
10801  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
10802  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
10803  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
10804  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
10805  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
10806  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
10807  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10808 
10809  fprintf(m_File, "Config,End\n");
10810 }
10811 
10812 void VmaRecorder::GetBasicParams(CallParams& outParams)
10813 {
10814  outParams.threadId = GetCurrentThreadId();
10815 
10816  LARGE_INTEGER counter;
10817  QueryPerformanceCounter(&counter);
10818  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
10819 }
10820 
10821 void VmaRecorder::Flush()
10822 {
10823  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
10824  {
10825  fflush(m_File);
10826  }
10827 }
10828 
10829 #endif // #if VMA_RECORDING_ENABLED
10830 
10832 // VmaAllocator_T
10833 
10834 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
10835  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
10836  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
10837  m_hDevice(pCreateInfo->device),
10838  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
10839  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
10840  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
10841  m_PreferredLargeHeapBlockSize(0),
10842  m_PhysicalDevice(pCreateInfo->physicalDevice),
10843  m_CurrentFrameIndex(0),
10844  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
10845  m_NextPoolId(0)
10847  ,m_pRecorder(VMA_NULL)
10848 #endif
10849 {
10850  if(VMA_DEBUG_DETECT_CORRUPTION)
10851  {
10852  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
10853  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
10854  }
10855 
10856  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
10857 
10858 #if !(VMA_DEDICATED_ALLOCATION)
10860  {
10861  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
10862  }
10863 #endif
10864 
10865  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
10866  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
10867  memset(&m_MemProps, 0, sizeof(m_MemProps));
10868 
10869  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
10870  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
10871 
10872  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
10873  {
10874  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
10875  }
10876 
10877  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
10878  {
10879  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
10880  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
10881  }
10882 
10883  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
10884 
10885  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
10886  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
10887 
10888  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
10889  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10890 
10891  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
10892  {
10893  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
10894  {
10895  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
10896  if(limit != VK_WHOLE_SIZE)
10897  {
10898  m_HeapSizeLimit[heapIndex] = limit;
10899  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
10900  {
10901  m_MemProps.memoryHeaps[heapIndex].size = limit;
10902  }
10903  }
10904  }
10905  }
10906 
10907  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
10908  {
10909  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
10910 
10911  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
10912  this,
10913  memTypeIndex,
10914  preferredBlockSize,
10915  0,
10916  SIZE_MAX,
10917  GetBufferImageGranularity(),
10918  pCreateInfo->frameInUseCount,
10919  false, // isCustomPool
10920  false); // linearAlgorithm
10921  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
10922  // becase minBlockCount is 0.
10923  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
10924 
10925  }
10926 }
10927 
10928 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
10929 {
10930  VkResult res = VK_SUCCESS;
10931 
10932  if(pCreateInfo->pRecordSettings != VMA_NULL &&
10933  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
10934  {
10935 #if VMA_RECORDING_ENABLED
10936  m_pRecorder = vma_new(this, VmaRecorder)();
10937  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
10938  if(res != VK_SUCCESS)
10939  {
10940  return res;
10941  }
10942  m_pRecorder->WriteConfiguration(
10943  m_PhysicalDeviceProperties,
10944  m_MemProps,
10945  m_UseKhrDedicatedAllocation);
10946  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
10947 #else
10948  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
10949  return VK_ERROR_FEATURE_NOT_PRESENT;
10950 #endif
10951  }
10952 
10953  return res;
10954 }
10955 
10956 VmaAllocator_T::~VmaAllocator_T()
10957 {
10958 #if VMA_RECORDING_ENABLED
10959  if(m_pRecorder != VMA_NULL)
10960  {
10961  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
10962  vma_delete(this, m_pRecorder);
10963  }
10964 #endif
10965 
10966  VMA_ASSERT(m_Pools.empty());
10967 
10968  for(size_t i = GetMemoryTypeCount(); i--; )
10969  {
10970  vma_delete(this, m_pDedicatedAllocations[i]);
10971  vma_delete(this, m_pBlockVectors[i]);
10972  }
10973 }
10974 
10975 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
10976 {
10977 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
10978  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
10979  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
10980  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
10981  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
10982  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
10983  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
10984  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
10985  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
10986  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
10987  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
10988  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
10989  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
10990  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
10991  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
10992  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
10993  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
10994 #if VMA_DEDICATED_ALLOCATION
10995  if(m_UseKhrDedicatedAllocation)
10996  {
10997  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
10998  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
10999  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11000  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11001  }
11002 #endif // #if VMA_DEDICATED_ALLOCATION
11003 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11004 
11005 #define VMA_COPY_IF_NOT_NULL(funcName) \
11006  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11007 
11008  if(pVulkanFunctions != VMA_NULL)
11009  {
11010  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11011  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11012  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11013  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11014  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11015  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11016  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11017  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11018  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11019  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11020  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11021  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11022  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11023  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11024  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11025  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11026 #if VMA_DEDICATED_ALLOCATION
11027  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11028  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11029 #endif
11030  }
11031 
11032 #undef VMA_COPY_IF_NOT_NULL
11033 
11034  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11035  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11036  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11037  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11038  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11039  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11040  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11041  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11042  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11043  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11044  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11045  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11046  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11047  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11048  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11049  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11050  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11051  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11052 #if VMA_DEDICATED_ALLOCATION
11053  if(m_UseKhrDedicatedAllocation)
11054  {
11055  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
11056  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
11057  }
11058 #endif
11059 }
11060 
11061 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
11062 {
11063  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11064  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
11065  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
11066  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
11067 }
11068 
11069 VkResult VmaAllocator_T::AllocateMemoryOfType(
11070  VkDeviceSize size,
11071  VkDeviceSize alignment,
11072  bool dedicatedAllocation,
11073  VkBuffer dedicatedBuffer,
11074  VkImage dedicatedImage,
11075  const VmaAllocationCreateInfo& createInfo,
11076  uint32_t memTypeIndex,
11077  VmaSuballocationType suballocType,
11078  VmaAllocation* pAllocation)
11079 {
11080  VMA_ASSERT(pAllocation != VMA_NULL);
11081  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
11082 
11083  VmaAllocationCreateInfo finalCreateInfo = createInfo;
11084 
11085  // If memory type is not HOST_VISIBLE, disable MAPPED.
11086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11087  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
11088  {
11089  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
11090  }
11091 
11092  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
11093  VMA_ASSERT(blockVector);
11094 
11095  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
11096  bool preferDedicatedMemory =
11097  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
11098  dedicatedAllocation ||
11099  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
11100  size > preferredBlockSize / 2;
11101 
11102  if(preferDedicatedMemory &&
11103  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
11104  finalCreateInfo.pool == VK_NULL_HANDLE)
11105  {
11107  }
11108 
11109  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
11110  {
11111  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11112  {
11113  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11114  }
11115  else
11116  {
11117  return AllocateDedicatedMemory(
11118  size,
11119  suballocType,
11120  memTypeIndex,
11121  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11122  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11123  finalCreateInfo.pUserData,
11124  dedicatedBuffer,
11125  dedicatedImage,
11126  pAllocation);
11127  }
11128  }
11129  else
11130  {
11131  VkResult res = blockVector->Allocate(
11132  VK_NULL_HANDLE, // hCurrentPool
11133  m_CurrentFrameIndex.load(),
11134  size,
11135  alignment,
11136  finalCreateInfo,
11137  suballocType,
11138  pAllocation);
11139  if(res == VK_SUCCESS)
11140  {
11141  return res;
11142  }
11143 
11144  // 5. Try dedicated memory.
11145  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11146  {
11147  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11148  }
11149  else
11150  {
11151  res = AllocateDedicatedMemory(
11152  size,
11153  suballocType,
11154  memTypeIndex,
11155  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11156  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11157  finalCreateInfo.pUserData,
11158  dedicatedBuffer,
11159  dedicatedImage,
11160  pAllocation);
11161  if(res == VK_SUCCESS)
11162  {
11163  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
11164  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
11165  return VK_SUCCESS;
11166  }
11167  else
11168  {
11169  // Everything failed: Return error code.
11170  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11171  return res;
11172  }
11173  }
11174  }
11175 }
11176 
11177 VkResult VmaAllocator_T::AllocateDedicatedMemory(
11178  VkDeviceSize size,
11179  VmaSuballocationType suballocType,
11180  uint32_t memTypeIndex,
11181  bool map,
11182  bool isUserDataString,
11183  void* pUserData,
11184  VkBuffer dedicatedBuffer,
11185  VkImage dedicatedImage,
11186  VmaAllocation* pAllocation)
11187 {
11188  VMA_ASSERT(pAllocation);
11189 
11190  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11191  allocInfo.memoryTypeIndex = memTypeIndex;
11192  allocInfo.allocationSize = size;
11193 
11194 #if VMA_DEDICATED_ALLOCATION
11195  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
11196  if(m_UseKhrDedicatedAllocation)
11197  {
11198  if(dedicatedBuffer != VK_NULL_HANDLE)
11199  {
11200  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
11201  dedicatedAllocInfo.buffer = dedicatedBuffer;
11202  allocInfo.pNext = &dedicatedAllocInfo;
11203  }
11204  else if(dedicatedImage != VK_NULL_HANDLE)
11205  {
11206  dedicatedAllocInfo.image = dedicatedImage;
11207  allocInfo.pNext = &dedicatedAllocInfo;
11208  }
11209  }
11210 #endif // #if VMA_DEDICATED_ALLOCATION
11211 
11212  // Allocate VkDeviceMemory.
11213  VkDeviceMemory hMemory = VK_NULL_HANDLE;
11214  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
11215  if(res < 0)
11216  {
11217  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11218  return res;
11219  }
11220 
11221  void* pMappedData = VMA_NULL;
11222  if(map)
11223  {
11224  res = (*m_VulkanFunctions.vkMapMemory)(
11225  m_hDevice,
11226  hMemory,
11227  0,
11228  VK_WHOLE_SIZE,
11229  0,
11230  &pMappedData);
11231  if(res < 0)
11232  {
11233  VMA_DEBUG_LOG(" vkMapMemory FAILED");
11234  FreeVulkanMemory(memTypeIndex, size, hMemory);
11235  return res;
11236  }
11237  }
11238 
11239  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
11240  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
11241  (*pAllocation)->SetUserData(this, pUserData);
11242  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11243  {
11244  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11245  }
11246 
11247  // Register it in m_pDedicatedAllocations.
11248  {
11249  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11250  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
11251  VMA_ASSERT(pDedicatedAllocations);
11252  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
11253  }
11254 
11255  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
11256 
11257  return VK_SUCCESS;
11258 }
11259 
11260 void VmaAllocator_T::GetBufferMemoryRequirements(
11261  VkBuffer hBuffer,
11262  VkMemoryRequirements& memReq,
11263  bool& requiresDedicatedAllocation,
11264  bool& prefersDedicatedAllocation) const
11265 {
11266 #if VMA_DEDICATED_ALLOCATION
11267  if(m_UseKhrDedicatedAllocation)
11268  {
11269  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
11270  memReqInfo.buffer = hBuffer;
11271 
11272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11273 
11274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11275  memReq2.pNext = &memDedicatedReq;
11276 
11277  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11278 
11279  memReq = memReq2.memoryRequirements;
11280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11282  }
11283  else
11284 #endif // #if VMA_DEDICATED_ALLOCATION
11285  {
11286  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
11287  requiresDedicatedAllocation = false;
11288  prefersDedicatedAllocation = false;
11289  }
11290 }
11291 
11292 void VmaAllocator_T::GetImageMemoryRequirements(
11293  VkImage hImage,
11294  VkMemoryRequirements& memReq,
11295  bool& requiresDedicatedAllocation,
11296  bool& prefersDedicatedAllocation) const
11297 {
11298 #if VMA_DEDICATED_ALLOCATION
11299  if(m_UseKhrDedicatedAllocation)
11300  {
11301  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
11302  memReqInfo.image = hImage;
11303 
11304  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11305 
11306  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11307  memReq2.pNext = &memDedicatedReq;
11308 
11309  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11310 
11311  memReq = memReq2.memoryRequirements;
11312  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11313  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11314  }
11315  else
11316 #endif // #if VMA_DEDICATED_ALLOCATION
11317  {
11318  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
11319  requiresDedicatedAllocation = false;
11320  prefersDedicatedAllocation = false;
11321  }
11322 }
11323 
11324 VkResult VmaAllocator_T::AllocateMemory(
11325  const VkMemoryRequirements& vkMemReq,
11326  bool requiresDedicatedAllocation,
11327  bool prefersDedicatedAllocation,
11328  VkBuffer dedicatedBuffer,
11329  VkImage dedicatedImage,
11330  const VmaAllocationCreateInfo& createInfo,
11331  VmaSuballocationType suballocType,
11332  VmaAllocation* pAllocation)
11333 {
11334  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
11335  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11336  {
11337  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
11338  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11339  }
11340  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11342  {
11343  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
11344  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11345  }
11346  if(requiresDedicatedAllocation)
11347  {
11348  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11349  {
11350  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
11351  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11352  }
11353  if(createInfo.pool != VK_NULL_HANDLE)
11354  {
11355  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
11356  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11357  }
11358  }
11359  if((createInfo.pool != VK_NULL_HANDLE) &&
11360  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
11361  {
11362  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
11363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11364  }
11365 
11366  if(createInfo.pool != VK_NULL_HANDLE)
11367  {
11368  const VkDeviceSize alignmentForPool = VMA_MAX(
11369  vkMemReq.alignment,
11370  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
11371  return createInfo.pool->m_BlockVector.Allocate(
11372  createInfo.pool,
11373  m_CurrentFrameIndex.load(),
11374  vkMemReq.size,
11375  alignmentForPool,
11376  createInfo,
11377  suballocType,
11378  pAllocation);
11379  }
11380  else
11381  {
11382  // Bit mask of memory Vulkan types acceptable for this allocation.
11383  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
11384  uint32_t memTypeIndex = UINT32_MAX;
11385  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11386  if(res == VK_SUCCESS)
11387  {
11388  VkDeviceSize alignmentForMemType = VMA_MAX(
11389  vkMemReq.alignment,
11390  GetMemoryTypeMinAlignment(memTypeIndex));
11391 
11392  res = AllocateMemoryOfType(
11393  vkMemReq.size,
11394  alignmentForMemType,
11395  requiresDedicatedAllocation || prefersDedicatedAllocation,
11396  dedicatedBuffer,
11397  dedicatedImage,
11398  createInfo,
11399  memTypeIndex,
11400  suballocType,
11401  pAllocation);
11402  // Succeeded on first try.
11403  if(res == VK_SUCCESS)
11404  {
11405  return res;
11406  }
11407  // Allocation from this memory type failed. Try other compatible memory types.
11408  else
11409  {
11410  for(;;)
11411  {
11412  // Remove old memTypeIndex from list of possibilities.
11413  memoryTypeBits &= ~(1u << memTypeIndex);
11414  // Find alternative memTypeIndex.
11415  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11416  if(res == VK_SUCCESS)
11417  {
11418  alignmentForMemType = VMA_MAX(
11419  vkMemReq.alignment,
11420  GetMemoryTypeMinAlignment(memTypeIndex));
11421 
11422  res = AllocateMemoryOfType(
11423  vkMemReq.size,
11424  alignmentForMemType,
11425  requiresDedicatedAllocation || prefersDedicatedAllocation,
11426  dedicatedBuffer,
11427  dedicatedImage,
11428  createInfo,
11429  memTypeIndex,
11430  suballocType,
11431  pAllocation);
11432  // Allocation from this alternative memory type succeeded.
11433  if(res == VK_SUCCESS)
11434  {
11435  return res;
11436  }
11437  // else: Allocation from this memory type failed. Try next one - next loop iteration.
11438  }
11439  // No other matching memory type index could be found.
11440  else
11441  {
11442  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11444  }
11445  }
11446  }
11447  }
11448  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
11449  else
11450  return res;
11451  }
11452 }
11453 
11454 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
11455 {
11456  VMA_ASSERT(allocation);
11457 
11458  if(allocation->CanBecomeLost() == false ||
11459  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11460  {
11461  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11462  {
11463  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
11464  }
11465 
11466  switch(allocation->GetType())
11467  {
11468  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11469  {
11470  VmaBlockVector* pBlockVector = VMA_NULL;
11471  VmaPool hPool = allocation->GetPool();
11472  if(hPool != VK_NULL_HANDLE)
11473  {
11474  pBlockVector = &hPool->m_BlockVector;
11475  }
11476  else
11477  {
11478  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
11479  pBlockVector = m_pBlockVectors[memTypeIndex];
11480  }
11481  pBlockVector->Free(allocation);
11482  }
11483  break;
11484  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
11485  FreeDedicatedMemory(allocation);
11486  break;
11487  default:
11488  VMA_ASSERT(0);
11489  }
11490  }
11491 
11492  allocation->SetUserData(this, VMA_NULL);
11493  vma_delete(this, allocation);
11494 }
11495 
11496 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
11497 {
11498  // Initialize.
11499  InitStatInfo(pStats->total);
11500  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
11501  InitStatInfo(pStats->memoryType[i]);
11502  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11503  InitStatInfo(pStats->memoryHeap[i]);
11504 
11505  // Process default pools.
11506  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11507  {
11508  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11509  VMA_ASSERT(pBlockVector);
11510  pBlockVector->AddStats(pStats);
11511  }
11512 
11513  // Process custom pools.
11514  {
11515  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11516  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11517  {
11518  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
11519  }
11520  }
11521 
11522  // Process dedicated allocations.
11523  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11524  {
11525  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11526  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11527  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
11528  VMA_ASSERT(pDedicatedAllocVector);
11529  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
11530  {
11531  VmaStatInfo allocationStatInfo;
11532  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
11533  VmaAddStatInfo(pStats->total, allocationStatInfo);
11534  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11535  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11536  }
11537  }
11538 
11539  // Postprocess.
11540  VmaPostprocessCalcStatInfo(pStats->total);
11541  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
11542  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
11543  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
11544  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
11545 }
11546 
11547 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
11548 
11549 VkResult VmaAllocator_T::Defragment(
11550  VmaAllocation* pAllocations,
11551  size_t allocationCount,
11552  VkBool32* pAllocationsChanged,
11553  const VmaDefragmentationInfo* pDefragmentationInfo,
11554  VmaDefragmentationStats* pDefragmentationStats)
11555 {
11556  if(pAllocationsChanged != VMA_NULL)
11557  {
11558  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
11559  }
11560  if(pDefragmentationStats != VMA_NULL)
11561  {
11562  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
11563  }
11564 
11565  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
11566 
11567  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
11568 
11569  const size_t poolCount = m_Pools.size();
11570 
11571  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
11572  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11573  {
11574  VmaAllocation hAlloc = pAllocations[allocIndex];
11575  VMA_ASSERT(hAlloc);
11576  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
11577  // DedicatedAlloc cannot be defragmented.
11578  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11579  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
11580  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
11581  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
11582  // Lost allocation cannot be defragmented.
11583  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
11584  {
11585  VmaBlockVector* pAllocBlockVector = VMA_NULL;
11586 
11587  const VmaPool hAllocPool = hAlloc->GetPool();
11588  // This allocation belongs to custom pool.
11589  if(hAllocPool != VK_NULL_HANDLE)
11590  {
11591  // Pools with linear algorithm are not defragmented.
11592  if(!hAllocPool->m_BlockVector.UsesLinearAlgorithm())
11593  {
11594  pAllocBlockVector = &hAllocPool->m_BlockVector;
11595  }
11596  }
11597  // This allocation belongs to general pool.
11598  else
11599  {
11600  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
11601  }
11602 
11603  if(pAllocBlockVector != VMA_NULL)
11604  {
11605  VmaDefragmentator* const pDefragmentator =
11606  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
11607  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
11608  &pAllocationsChanged[allocIndex] : VMA_NULL;
11609  pDefragmentator->AddAllocation(hAlloc, pChanged);
11610  }
11611  }
11612  }
11613 
11614  VkResult result = VK_SUCCESS;
11615 
11616  // ======== Main processing.
11617 
11618  VkDeviceSize maxBytesToMove = SIZE_MAX;
11619  uint32_t maxAllocationsToMove = UINT32_MAX;
11620  if(pDefragmentationInfo != VMA_NULL)
11621  {
11622  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
11623  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
11624  }
11625 
11626  // Process standard memory.
11627  for(uint32_t memTypeIndex = 0;
11628  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
11629  ++memTypeIndex)
11630  {
11631  // Only HOST_VISIBLE memory types can be defragmented.
11632  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11633  {
11634  result = m_pBlockVectors[memTypeIndex]->Defragment(
11635  pDefragmentationStats,
11636  maxBytesToMove,
11637  maxAllocationsToMove);
11638  }
11639  }
11640 
11641  // Process custom pools.
11642  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
11643  {
11644  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
11645  pDefragmentationStats,
11646  maxBytesToMove,
11647  maxAllocationsToMove);
11648  }
11649 
11650  // ======== Destroy defragmentators.
11651 
11652  // Process custom pools.
11653  for(size_t poolIndex = poolCount; poolIndex--; )
11654  {
11655  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
11656  }
11657 
11658  // Process standard memory.
11659  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
11660  {
11661  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11662  {
11663  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
11664  }
11665  }
11666 
11667  return result;
11668 }
11669 
11670 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
11671 {
11672  if(hAllocation->CanBecomeLost())
11673  {
11674  /*
11675  Warning: This is a carefully designed algorithm.
11676  Do not modify unless you really know what you're doing :)
11677  */
11678  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11679  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11680  for(;;)
11681  {
11682  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11683  {
11684  pAllocationInfo->memoryType = UINT32_MAX;
11685  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
11686  pAllocationInfo->offset = 0;
11687  pAllocationInfo->size = hAllocation->GetSize();
11688  pAllocationInfo->pMappedData = VMA_NULL;
11689  pAllocationInfo->pUserData = hAllocation->GetUserData();
11690  return;
11691  }
11692  else if(localLastUseFrameIndex == localCurrFrameIndex)
11693  {
11694  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11695  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11696  pAllocationInfo->offset = hAllocation->GetOffset();
11697  pAllocationInfo->size = hAllocation->GetSize();
11698  pAllocationInfo->pMappedData = VMA_NULL;
11699  pAllocationInfo->pUserData = hAllocation->GetUserData();
11700  return;
11701  }
11702  else // Last use time earlier than current time.
11703  {
11704  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11705  {
11706  localLastUseFrameIndex = localCurrFrameIndex;
11707  }
11708  }
11709  }
11710  }
11711  else
11712  {
11713 #if VMA_STATS_STRING_ENABLED
11714  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11715  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11716  for(;;)
11717  {
11718  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11719  if(localLastUseFrameIndex == localCurrFrameIndex)
11720  {
11721  break;
11722  }
11723  else // Last use time earlier than current time.
11724  {
11725  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11726  {
11727  localLastUseFrameIndex = localCurrFrameIndex;
11728  }
11729  }
11730  }
11731 #endif
11732 
11733  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11734  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11735  pAllocationInfo->offset = hAllocation->GetOffset();
11736  pAllocationInfo->size = hAllocation->GetSize();
11737  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
11738  pAllocationInfo->pUserData = hAllocation->GetUserData();
11739  }
11740 }
11741 
11742 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
11743 {
11744  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
11745  if(hAllocation->CanBecomeLost())
11746  {
11747  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11748  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11749  for(;;)
11750  {
11751  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11752  {
11753  return false;
11754  }
11755  else if(localLastUseFrameIndex == localCurrFrameIndex)
11756  {
11757  return true;
11758  }
11759  else // Last use time earlier than current time.
11760  {
11761  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11762  {
11763  localLastUseFrameIndex = localCurrFrameIndex;
11764  }
11765  }
11766  }
11767  }
11768  else
11769  {
11770 #if VMA_STATS_STRING_ENABLED
11771  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11772  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11773  for(;;)
11774  {
11775  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11776  if(localLastUseFrameIndex == localCurrFrameIndex)
11777  {
11778  break;
11779  }
11780  else // Last use time earlier than current time.
11781  {
11782  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11783  {
11784  localLastUseFrameIndex = localCurrFrameIndex;
11785  }
11786  }
11787  }
11788 #endif
11789 
11790  return true;
11791  }
11792 }
11793 
11794 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
11795 {
11796  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
11797 
11798  const bool isLinearAlgorithm = (pCreateInfo->flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0;
11799 
11800  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
11801 
11802  if(newCreateInfo.maxBlockCount == 0)
11803  {
11804  newCreateInfo.maxBlockCount = isLinearAlgorithm ? 1 : SIZE_MAX;
11805  }
11806  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount ||
11807  isLinearAlgorithm && newCreateInfo.maxBlockCount > 1)
11808  {
11809  return VK_ERROR_INITIALIZATION_FAILED;
11810  }
11811  if(newCreateInfo.blockSize == 0)
11812  {
11813  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
11814  }
11815 
11816  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
11817 
11818  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
11819  if(res != VK_SUCCESS)
11820  {
11821  vma_delete(this, *pPool);
11822  *pPool = VMA_NULL;
11823  return res;
11824  }
11825 
11826  // Add to m_Pools.
11827  {
11828  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11829  (*pPool)->SetId(m_NextPoolId++);
11830  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
11831  }
11832 
11833  return VK_SUCCESS;
11834 }
11835 
11836 void VmaAllocator_T::DestroyPool(VmaPool pool)
11837 {
11838  // Remove from m_Pools.
11839  {
11840  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11841  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
11842  VMA_ASSERT(success && "Pool not found in Allocator.");
11843  }
11844 
11845  vma_delete(this, pool);
11846 }
11847 
11848 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
11849 {
11850  pool->m_BlockVector.GetPoolStats(pPoolStats);
11851 }
11852 
11853 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
11854 {
11855  m_CurrentFrameIndex.store(frameIndex);
11856 }
11857 
11858 void VmaAllocator_T::MakePoolAllocationsLost(
11859  VmaPool hPool,
11860  size_t* pLostAllocationCount)
11861 {
11862  hPool->m_BlockVector.MakePoolAllocationsLost(
11863  m_CurrentFrameIndex.load(),
11864  pLostAllocationCount);
11865 }
11866 
11867 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
11868 {
11869  return hPool->m_BlockVector.CheckCorruption();
11870 }
11871 
11872 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
11873 {
11874  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
11875 
11876  // Process default pools.
11877  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11878  {
11879  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
11880  {
11881  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11882  VMA_ASSERT(pBlockVector);
11883  VkResult localRes = pBlockVector->CheckCorruption();
11884  switch(localRes)
11885  {
11886  case VK_ERROR_FEATURE_NOT_PRESENT:
11887  break;
11888  case VK_SUCCESS:
11889  finalRes = VK_SUCCESS;
11890  break;
11891  default:
11892  return localRes;
11893  }
11894  }
11895  }
11896 
11897  // Process custom pools.
11898  {
11899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11900  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11901  {
11902  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
11903  {
11904  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
11905  switch(localRes)
11906  {
11907  case VK_ERROR_FEATURE_NOT_PRESENT:
11908  break;
11909  case VK_SUCCESS:
11910  finalRes = VK_SUCCESS;
11911  break;
11912  default:
11913  return localRes;
11914  }
11915  }
11916  }
11917  }
11918 
11919  return finalRes;
11920 }
11921 
11922 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
11923 {
11924  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
11925  (*pAllocation)->InitLost();
11926 }
11927 
11928 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
11929 {
11930  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
11931 
11932  VkResult res;
11933  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11934  {
11935  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11936  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
11937  {
11938  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11939  if(res == VK_SUCCESS)
11940  {
11941  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
11942  }
11943  }
11944  else
11945  {
11946  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
11947  }
11948  }
11949  else
11950  {
11951  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11952  }
11953 
11954  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
11955  {
11956  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
11957  }
11958 
11959  return res;
11960 }
11961 
11962 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
11963 {
11964  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
11965  {
11966  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
11967  }
11968 
11969  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
11970 
11971  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
11972  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11973  {
11974  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11975  m_HeapSizeLimit[heapIndex] += size;
11976  }
11977 }
11978 
11979 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
11980 {
11981  if(hAllocation->CanBecomeLost())
11982  {
11983  return VK_ERROR_MEMORY_MAP_FAILED;
11984  }
11985 
11986  switch(hAllocation->GetType())
11987  {
11988  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11989  {
11990  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
11991  char *pBytes = VMA_NULL;
11992  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
11993  if(res == VK_SUCCESS)
11994  {
11995  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
11996  hAllocation->BlockAllocMap();
11997  }
11998  return res;
11999  }
12000  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12001  return hAllocation->DedicatedAllocMap(this, ppData);
12002  default:
12003  VMA_ASSERT(0);
12004  return VK_ERROR_MEMORY_MAP_FAILED;
12005  }
12006 }
12007 
12008 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12009 {
12010  switch(hAllocation->GetType())
12011  {
12012  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12013  {
12014  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12015  hAllocation->BlockAllocUnmap();
12016  pBlock->Unmap(this, 1);
12017  }
12018  break;
12019  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12020  hAllocation->DedicatedAllocUnmap(this);
12021  break;
12022  default:
12023  VMA_ASSERT(0);
12024  }
12025 }
12026 
12027 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12028 {
12029  VkResult res = VK_SUCCESS;
12030  switch(hAllocation->GetType())
12031  {
12032  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12033  res = GetVulkanFunctions().vkBindBufferMemory(
12034  m_hDevice,
12035  hBuffer,
12036  hAllocation->GetMemory(),
12037  0); //memoryOffset
12038  break;
12039  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12040  {
12041  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12042  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12043  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12044  break;
12045  }
12046  default:
12047  VMA_ASSERT(0);
12048  }
12049  return res;
12050 }
12051 
12052 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12053 {
12054  VkResult res = VK_SUCCESS;
12055  switch(hAllocation->GetType())
12056  {
12057  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12058  res = GetVulkanFunctions().vkBindImageMemory(
12059  m_hDevice,
12060  hImage,
12061  hAllocation->GetMemory(),
12062  0); //memoryOffset
12063  break;
12064  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12065  {
12066  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12067  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
12068  res = pBlock->BindImageMemory(this, hAllocation, hImage);
12069  break;
12070  }
12071  default:
12072  VMA_ASSERT(0);
12073  }
12074  return res;
12075 }
12076 
12077 void VmaAllocator_T::FlushOrInvalidateAllocation(
12078  VmaAllocation hAllocation,
12079  VkDeviceSize offset, VkDeviceSize size,
12080  VMA_CACHE_OPERATION op)
12081 {
12082  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
12083  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
12084  {
12085  const VkDeviceSize allocationSize = hAllocation->GetSize();
12086  VMA_ASSERT(offset <= allocationSize);
12087 
12088  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12089 
12090  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12091  memRange.memory = hAllocation->GetMemory();
12092 
12093  switch(hAllocation->GetType())
12094  {
12095  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12096  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12097  if(size == VK_WHOLE_SIZE)
12098  {
12099  memRange.size = allocationSize - memRange.offset;
12100  }
12101  else
12102  {
12103  VMA_ASSERT(offset + size <= allocationSize);
12104  memRange.size = VMA_MIN(
12105  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
12106  allocationSize - memRange.offset);
12107  }
12108  break;
12109 
12110  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12111  {
12112  // 1. Still within this allocation.
12113  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12114  if(size == VK_WHOLE_SIZE)
12115  {
12116  size = allocationSize - offset;
12117  }
12118  else
12119  {
12120  VMA_ASSERT(offset + size <= allocationSize);
12121  }
12122  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
12123 
12124  // 2. Adjust to whole block.
12125  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
12126  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
12127  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
12128  memRange.offset += allocationOffset;
12129  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
12130 
12131  break;
12132  }
12133 
12134  default:
12135  VMA_ASSERT(0);
12136  }
12137 
12138  switch(op)
12139  {
12140  case VMA_CACHE_FLUSH:
12141  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
12142  break;
12143  case VMA_CACHE_INVALIDATE:
12144  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
12145  break;
12146  default:
12147  VMA_ASSERT(0);
12148  }
12149  }
12150  // else: Just ignore this call.
12151 }
12152 
12153 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
12154 {
12155  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
12156 
12157  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12158  {
12159  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12160  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12161  VMA_ASSERT(pDedicatedAllocations);
12162  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
12163  VMA_ASSERT(success);
12164  }
12165 
12166  VkDeviceMemory hMemory = allocation->GetMemory();
12167 
12168  if(allocation->GetMappedData() != VMA_NULL)
12169  {
12170  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
12171  }
12172 
12173  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
12174 
12175  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
12176 }
12177 
12178 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
12179 {
12180  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
12181  !hAllocation->CanBecomeLost() &&
12182  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12183  {
12184  void* pData = VMA_NULL;
12185  VkResult res = Map(hAllocation, &pData);
12186  if(res == VK_SUCCESS)
12187  {
12188  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
12189  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
12190  Unmap(hAllocation);
12191  }
12192  else
12193  {
12194  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
12195  }
12196  }
12197 }
12198 
12199 #if VMA_STATS_STRING_ENABLED
12200 
12201 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
12202 {
12203  bool dedicatedAllocationsStarted = false;
12204  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12205  {
12206  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12207  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12208  VMA_ASSERT(pDedicatedAllocVector);
12209  if(pDedicatedAllocVector->empty() == false)
12210  {
12211  if(dedicatedAllocationsStarted == false)
12212  {
12213  dedicatedAllocationsStarted = true;
12214  json.WriteString("DedicatedAllocations");
12215  json.BeginObject();
12216  }
12217 
12218  json.BeginString("Type ");
12219  json.ContinueString(memTypeIndex);
12220  json.EndString();
12221 
12222  json.BeginArray();
12223 
12224  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
12225  {
12226  json.BeginObject(true);
12227  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
12228  hAlloc->PrintParameters(json);
12229  json.EndObject();
12230  }
12231 
12232  json.EndArray();
12233  }
12234  }
12235  if(dedicatedAllocationsStarted)
12236  {
12237  json.EndObject();
12238  }
12239 
12240  {
12241  bool allocationsStarted = false;
12242  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12243  {
12244  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
12245  {
12246  if(allocationsStarted == false)
12247  {
12248  allocationsStarted = true;
12249  json.WriteString("DefaultPools");
12250  json.BeginObject();
12251  }
12252 
12253  json.BeginString("Type ");
12254  json.ContinueString(memTypeIndex);
12255  json.EndString();
12256 
12257  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
12258  }
12259  }
12260  if(allocationsStarted)
12261  {
12262  json.EndObject();
12263  }
12264  }
12265 
12266  // Custom pools
12267  {
12268  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12269  const size_t poolCount = m_Pools.size();
12270  if(poolCount > 0)
12271  {
12272  json.WriteString("Pools");
12273  json.BeginObject();
12274  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
12275  {
12276  json.BeginString();
12277  json.ContinueString(m_Pools[poolIndex]->GetId());
12278  json.EndString();
12279 
12280  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
12281  }
12282  json.EndObject();
12283  }
12284  }
12285 }
12286 
12287 #endif // #if VMA_STATS_STRING_ENABLED
12288 
12290 // Public interface
12291 
12292 VkResult vmaCreateAllocator(
12293  const VmaAllocatorCreateInfo* pCreateInfo,
12294  VmaAllocator* pAllocator)
12295 {
12296  VMA_ASSERT(pCreateInfo && pAllocator);
12297  VMA_DEBUG_LOG("vmaCreateAllocator");
12298  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
12299  return (*pAllocator)->Init(pCreateInfo);
12300 }
12301 
12302 void vmaDestroyAllocator(
12303  VmaAllocator allocator)
12304 {
12305  if(allocator != VK_NULL_HANDLE)
12306  {
12307  VMA_DEBUG_LOG("vmaDestroyAllocator");
12308  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
12309  vma_delete(&allocationCallbacks, allocator);
12310  }
12311 }
12312 
12314  VmaAllocator allocator,
12315  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
12316 {
12317  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
12318  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
12319 }
12320 
12322  VmaAllocator allocator,
12323  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
12324 {
12325  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
12326  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
12327 }
12328 
12330  VmaAllocator allocator,
12331  uint32_t memoryTypeIndex,
12332  VkMemoryPropertyFlags* pFlags)
12333 {
12334  VMA_ASSERT(allocator && pFlags);
12335  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
12336  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
12337 }
12338 
12340  VmaAllocator allocator,
12341  uint32_t frameIndex)
12342 {
12343  VMA_ASSERT(allocator);
12344  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
12345 
12346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12347 
12348  allocator->SetCurrentFrameIndex(frameIndex);
12349 }
12350 
12351 void vmaCalculateStats(
12352  VmaAllocator allocator,
12353  VmaStats* pStats)
12354 {
12355  VMA_ASSERT(allocator && pStats);
12356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12357  allocator->CalculateStats(pStats);
12358 }
12359 
12360 #if VMA_STATS_STRING_ENABLED
12361 
12362 void vmaBuildStatsString(
12363  VmaAllocator allocator,
12364  char** ppStatsString,
12365  VkBool32 detailedMap)
12366 {
12367  VMA_ASSERT(allocator && ppStatsString);
12368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12369 
12370  VmaStringBuilder sb(allocator);
12371  {
12372  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
12373  json.BeginObject();
12374 
12375  VmaStats stats;
12376  allocator->CalculateStats(&stats);
12377 
12378  json.WriteString("Total");
12379  VmaPrintStatInfo(json, stats.total);
12380 
12381  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
12382  {
12383  json.BeginString("Heap ");
12384  json.ContinueString(heapIndex);
12385  json.EndString();
12386  json.BeginObject();
12387 
12388  json.WriteString("Size");
12389  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
12390 
12391  json.WriteString("Flags");
12392  json.BeginArray(true);
12393  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
12394  {
12395  json.WriteString("DEVICE_LOCAL");
12396  }
12397  json.EndArray();
12398 
12399  if(stats.memoryHeap[heapIndex].blockCount > 0)
12400  {
12401  json.WriteString("Stats");
12402  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
12403  }
12404 
12405  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
12406  {
12407  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
12408  {
12409  json.BeginString("Type ");
12410  json.ContinueString(typeIndex);
12411  json.EndString();
12412 
12413  json.BeginObject();
12414 
12415  json.WriteString("Flags");
12416  json.BeginArray(true);
12417  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
12418  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
12419  {
12420  json.WriteString("DEVICE_LOCAL");
12421  }
12422  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12423  {
12424  json.WriteString("HOST_VISIBLE");
12425  }
12426  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
12427  {
12428  json.WriteString("HOST_COHERENT");
12429  }
12430  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
12431  {
12432  json.WriteString("HOST_CACHED");
12433  }
12434  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
12435  {
12436  json.WriteString("LAZILY_ALLOCATED");
12437  }
12438  json.EndArray();
12439 
12440  if(stats.memoryType[typeIndex].blockCount > 0)
12441  {
12442  json.WriteString("Stats");
12443  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
12444  }
12445 
12446  json.EndObject();
12447  }
12448  }
12449 
12450  json.EndObject();
12451  }
12452  if(detailedMap == VK_TRUE)
12453  {
12454  allocator->PrintDetailedMap(json);
12455  }
12456 
12457  json.EndObject();
12458  }
12459 
12460  const size_t len = sb.GetLength();
12461  char* const pChars = vma_new_array(allocator, char, len + 1);
12462  if(len > 0)
12463  {
12464  memcpy(pChars, sb.GetData(), len);
12465  }
12466  pChars[len] = '\0';
12467  *ppStatsString = pChars;
12468 }
12469 
12470 void vmaFreeStatsString(
12471  VmaAllocator allocator,
12472  char* pStatsString)
12473 {
12474  if(pStatsString != VMA_NULL)
12475  {
12476  VMA_ASSERT(allocator);
12477  size_t len = strlen(pStatsString);
12478  vma_delete_array(allocator, pStatsString, len + 1);
12479  }
12480 }
12481 
12482 #endif // #if VMA_STATS_STRING_ENABLED
12483 
12484 /*
12485 This function is not protected by any mutex because it just reads immutable data.
12486 */
12487 VkResult vmaFindMemoryTypeIndex(
12488  VmaAllocator allocator,
12489  uint32_t memoryTypeBits,
12490  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12491  uint32_t* pMemoryTypeIndex)
12492 {
12493  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12494  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12495  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12496 
12497  if(pAllocationCreateInfo->memoryTypeBits != 0)
12498  {
12499  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
12500  }
12501 
12502  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
12503  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
12504 
12505  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12506  if(mapped)
12507  {
12508  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12509  }
12510 
12511  // Convert usage to requiredFlags and preferredFlags.
12512  switch(pAllocationCreateInfo->usage)
12513  {
12515  break;
12517  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12518  {
12519  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12520  }
12521  break;
12523  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12524  break;
12526  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12527  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12528  {
12529  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12530  }
12531  break;
12533  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12534  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
12535  break;
12536  default:
12537  break;
12538  }
12539 
12540  *pMemoryTypeIndex = UINT32_MAX;
12541  uint32_t minCost = UINT32_MAX;
12542  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
12543  memTypeIndex < allocator->GetMemoryTypeCount();
12544  ++memTypeIndex, memTypeBit <<= 1)
12545  {
12546  // This memory type is acceptable according to memoryTypeBits bitmask.
12547  if((memTypeBit & memoryTypeBits) != 0)
12548  {
12549  const VkMemoryPropertyFlags currFlags =
12550  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
12551  // This memory type contains requiredFlags.
12552  if((requiredFlags & ~currFlags) == 0)
12553  {
12554  // Calculate cost as number of bits from preferredFlags not present in this memory type.
12555  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
12556  // Remember memory type with lowest cost.
12557  if(currCost < minCost)
12558  {
12559  *pMemoryTypeIndex = memTypeIndex;
12560  if(currCost == 0)
12561  {
12562  return VK_SUCCESS;
12563  }
12564  minCost = currCost;
12565  }
12566  }
12567  }
12568  }
12569  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
12570 }
12571 
12573  VmaAllocator allocator,
12574  const VkBufferCreateInfo* pBufferCreateInfo,
12575  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12576  uint32_t* pMemoryTypeIndex)
12577 {
12578  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12579  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
12580  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12581  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12582 
12583  const VkDevice hDev = allocator->m_hDevice;
12584  VkBuffer hBuffer = VK_NULL_HANDLE;
12585  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
12586  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
12587  if(res == VK_SUCCESS)
12588  {
12589  VkMemoryRequirements memReq = {};
12590  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
12591  hDev, hBuffer, &memReq);
12592 
12593  res = vmaFindMemoryTypeIndex(
12594  allocator,
12595  memReq.memoryTypeBits,
12596  pAllocationCreateInfo,
12597  pMemoryTypeIndex);
12598 
12599  allocator->GetVulkanFunctions().vkDestroyBuffer(
12600  hDev, hBuffer, allocator->GetAllocationCallbacks());
12601  }
12602  return res;
12603 }
12604 
12606  VmaAllocator allocator,
12607  const VkImageCreateInfo* pImageCreateInfo,
12608  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12609  uint32_t* pMemoryTypeIndex)
12610 {
12611  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12612  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
12613  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12614  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12615 
12616  const VkDevice hDev = allocator->m_hDevice;
12617  VkImage hImage = VK_NULL_HANDLE;
12618  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
12619  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
12620  if(res == VK_SUCCESS)
12621  {
12622  VkMemoryRequirements memReq = {};
12623  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
12624  hDev, hImage, &memReq);
12625 
12626  res = vmaFindMemoryTypeIndex(
12627  allocator,
12628  memReq.memoryTypeBits,
12629  pAllocationCreateInfo,
12630  pMemoryTypeIndex);
12631 
12632  allocator->GetVulkanFunctions().vkDestroyImage(
12633  hDev, hImage, allocator->GetAllocationCallbacks());
12634  }
12635  return res;
12636 }
12637 
12638 VkResult vmaCreatePool(
12639  VmaAllocator allocator,
12640  const VmaPoolCreateInfo* pCreateInfo,
12641  VmaPool* pPool)
12642 {
12643  VMA_ASSERT(allocator && pCreateInfo && pPool);
12644 
12645  VMA_DEBUG_LOG("vmaCreatePool");
12646 
12647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12648 
12649  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
12650 
12651 #if VMA_RECORDING_ENABLED
12652  if(allocator->GetRecorder() != VMA_NULL)
12653  {
12654  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
12655  }
12656 #endif
12657 
12658  return res;
12659 }
12660 
12661 void vmaDestroyPool(
12662  VmaAllocator allocator,
12663  VmaPool pool)
12664 {
12665  VMA_ASSERT(allocator);
12666 
12667  if(pool == VK_NULL_HANDLE)
12668  {
12669  return;
12670  }
12671 
12672  VMA_DEBUG_LOG("vmaDestroyPool");
12673 
12674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12675 
12676 #if VMA_RECORDING_ENABLED
12677  if(allocator->GetRecorder() != VMA_NULL)
12678  {
12679  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
12680  }
12681 #endif
12682 
12683  allocator->DestroyPool(pool);
12684 }
12685 
12686 void vmaGetPoolStats(
12687  VmaAllocator allocator,
12688  VmaPool pool,
12689  VmaPoolStats* pPoolStats)
12690 {
12691  VMA_ASSERT(allocator && pool && pPoolStats);
12692 
12693  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12694 
12695  allocator->GetPoolStats(pool, pPoolStats);
12696 }
12697 
12699  VmaAllocator allocator,
12700  VmaPool pool,
12701  size_t* pLostAllocationCount)
12702 {
12703  VMA_ASSERT(allocator && pool);
12704 
12705  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12706 
12707 #if VMA_RECORDING_ENABLED
12708  if(allocator->GetRecorder() != VMA_NULL)
12709  {
12710  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
12711  }
12712 #endif
12713 
12714  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
12715 }
12716 
12717 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
12718 {
12719  VMA_ASSERT(allocator && pool);
12720 
12721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12722 
12723  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
12724 
12725  return allocator->CheckPoolCorruption(pool);
12726 }
12727 
12728 VkResult vmaAllocateMemory(
12729  VmaAllocator allocator,
12730  const VkMemoryRequirements* pVkMemoryRequirements,
12731  const VmaAllocationCreateInfo* pCreateInfo,
12732  VmaAllocation* pAllocation,
12733  VmaAllocationInfo* pAllocationInfo)
12734 {
12735  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
12736 
12737  VMA_DEBUG_LOG("vmaAllocateMemory");
12738 
12739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12740 
12741  VkResult result = allocator->AllocateMemory(
12742  *pVkMemoryRequirements,
12743  false, // requiresDedicatedAllocation
12744  false, // prefersDedicatedAllocation
12745  VK_NULL_HANDLE, // dedicatedBuffer
12746  VK_NULL_HANDLE, // dedicatedImage
12747  *pCreateInfo,
12748  VMA_SUBALLOCATION_TYPE_UNKNOWN,
12749  pAllocation);
12750 
12751 #if VMA_RECORDING_ENABLED
12752  if(allocator->GetRecorder() != VMA_NULL)
12753  {
12754  allocator->GetRecorder()->RecordAllocateMemory(
12755  allocator->GetCurrentFrameIndex(),
12756  *pVkMemoryRequirements,
12757  *pCreateInfo,
12758  *pAllocation);
12759  }
12760 #endif
12761 
12762  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
12763  {
12764  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12765  }
12766 
12767  return result;
12768 }
12769 
12771  VmaAllocator allocator,
12772  VkBuffer buffer,
12773  const VmaAllocationCreateInfo* pCreateInfo,
12774  VmaAllocation* pAllocation,
12775  VmaAllocationInfo* pAllocationInfo)
12776 {
12777  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12778 
12779  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
12780 
12781  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12782 
12783  VkMemoryRequirements vkMemReq = {};
12784  bool requiresDedicatedAllocation = false;
12785  bool prefersDedicatedAllocation = false;
12786  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
12787  requiresDedicatedAllocation,
12788  prefersDedicatedAllocation);
12789 
12790  VkResult result = allocator->AllocateMemory(
12791  vkMemReq,
12792  requiresDedicatedAllocation,
12793  prefersDedicatedAllocation,
12794  buffer, // dedicatedBuffer
12795  VK_NULL_HANDLE, // dedicatedImage
12796  *pCreateInfo,
12797  VMA_SUBALLOCATION_TYPE_BUFFER,
12798  pAllocation);
12799 
12800 #if VMA_RECORDING_ENABLED
12801  if(allocator->GetRecorder() != VMA_NULL)
12802  {
12803  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
12804  allocator->GetCurrentFrameIndex(),
12805  vkMemReq,
12806  requiresDedicatedAllocation,
12807  prefersDedicatedAllocation,
12808  *pCreateInfo,
12809  *pAllocation);
12810  }
12811 #endif
12812 
12813  if(pAllocationInfo && result == VK_SUCCESS)
12814  {
12815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12816  }
12817 
12818  return result;
12819 }
12820 
12821 VkResult vmaAllocateMemoryForImage(
12822  VmaAllocator allocator,
12823  VkImage image,
12824  const VmaAllocationCreateInfo* pCreateInfo,
12825  VmaAllocation* pAllocation,
12826  VmaAllocationInfo* pAllocationInfo)
12827 {
12828  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12829 
12830  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
12831 
12832  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12833 
12834  VkMemoryRequirements vkMemReq = {};
12835  bool requiresDedicatedAllocation = false;
12836  bool prefersDedicatedAllocation = false;
12837  allocator->GetImageMemoryRequirements(image, vkMemReq,
12838  requiresDedicatedAllocation, prefersDedicatedAllocation);
12839 
12840  VkResult result = allocator->AllocateMemory(
12841  vkMemReq,
12842  requiresDedicatedAllocation,
12843  prefersDedicatedAllocation,
12844  VK_NULL_HANDLE, // dedicatedBuffer
12845  image, // dedicatedImage
12846  *pCreateInfo,
12847  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
12848  pAllocation);
12849 
12850 #if VMA_RECORDING_ENABLED
12851  if(allocator->GetRecorder() != VMA_NULL)
12852  {
12853  allocator->GetRecorder()->RecordAllocateMemoryForImage(
12854  allocator->GetCurrentFrameIndex(),
12855  vkMemReq,
12856  requiresDedicatedAllocation,
12857  prefersDedicatedAllocation,
12858  *pCreateInfo,
12859  *pAllocation);
12860  }
12861 #endif
12862 
12863  if(pAllocationInfo && result == VK_SUCCESS)
12864  {
12865  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12866  }
12867 
12868  return result;
12869 }
12870 
12871 void vmaFreeMemory(
12872  VmaAllocator allocator,
12873  VmaAllocation allocation)
12874 {
12875  VMA_ASSERT(allocator);
12876 
12877  if(allocation == VK_NULL_HANDLE)
12878  {
12879  return;
12880  }
12881 
12882  VMA_DEBUG_LOG("vmaFreeMemory");
12883 
12884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12885 
12886 #if VMA_RECORDING_ENABLED
12887  if(allocator->GetRecorder() != VMA_NULL)
12888  {
12889  allocator->GetRecorder()->RecordFreeMemory(
12890  allocator->GetCurrentFrameIndex(),
12891  allocation);
12892  }
12893 #endif
12894 
12895  allocator->FreeMemory(allocation);
12896 }
12897 
12899  VmaAllocator allocator,
12900  VmaAllocation allocation,
12901  VmaAllocationInfo* pAllocationInfo)
12902 {
12903  VMA_ASSERT(allocator && allocation && pAllocationInfo);
12904 
12905  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12906 
12907 #if VMA_RECORDING_ENABLED
12908  if(allocator->GetRecorder() != VMA_NULL)
12909  {
12910  allocator->GetRecorder()->RecordGetAllocationInfo(
12911  allocator->GetCurrentFrameIndex(),
12912  allocation);
12913  }
12914 #endif
12915 
12916  allocator->GetAllocationInfo(allocation, pAllocationInfo);
12917 }
12918 
12919 VkBool32 vmaTouchAllocation(
12920  VmaAllocator allocator,
12921  VmaAllocation allocation)
12922 {
12923  VMA_ASSERT(allocator && allocation);
12924 
12925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12926 
12927 #if VMA_RECORDING_ENABLED
12928  if(allocator->GetRecorder() != VMA_NULL)
12929  {
12930  allocator->GetRecorder()->RecordTouchAllocation(
12931  allocator->GetCurrentFrameIndex(),
12932  allocation);
12933  }
12934 #endif
12935 
12936  return allocator->TouchAllocation(allocation);
12937 }
12938 
12940  VmaAllocator allocator,
12941  VmaAllocation allocation,
12942  void* pUserData)
12943 {
12944  VMA_ASSERT(allocator && allocation);
12945 
12946  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12947 
12948  allocation->SetUserData(allocator, pUserData);
12949 
12950 #if VMA_RECORDING_ENABLED
12951  if(allocator->GetRecorder() != VMA_NULL)
12952  {
12953  allocator->GetRecorder()->RecordSetAllocationUserData(
12954  allocator->GetCurrentFrameIndex(),
12955  allocation,
12956  pUserData);
12957  }
12958 #endif
12959 }
12960 
12962  VmaAllocator allocator,
12963  VmaAllocation* pAllocation)
12964 {
12965  VMA_ASSERT(allocator && pAllocation);
12966 
12967  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
12968 
12969  allocator->CreateLostAllocation(pAllocation);
12970 
12971 #if VMA_RECORDING_ENABLED
12972  if(allocator->GetRecorder() != VMA_NULL)
12973  {
12974  allocator->GetRecorder()->RecordCreateLostAllocation(
12975  allocator->GetCurrentFrameIndex(),
12976  *pAllocation);
12977  }
12978 #endif
12979 }
12980 
12981 VkResult vmaMapMemory(
12982  VmaAllocator allocator,
12983  VmaAllocation allocation,
12984  void** ppData)
12985 {
12986  VMA_ASSERT(allocator && allocation && ppData);
12987 
12988  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12989 
12990  VkResult res = allocator->Map(allocation, ppData);
12991 
12992 #if VMA_RECORDING_ENABLED
12993  if(allocator->GetRecorder() != VMA_NULL)
12994  {
12995  allocator->GetRecorder()->RecordMapMemory(
12996  allocator->GetCurrentFrameIndex(),
12997  allocation);
12998  }
12999 #endif
13000 
13001  return res;
13002 }
13003 
13004 void vmaUnmapMemory(
13005  VmaAllocator allocator,
13006  VmaAllocation allocation)
13007 {
13008  VMA_ASSERT(allocator && allocation);
13009 
13010  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13011 
13012 #if VMA_RECORDING_ENABLED
13013  if(allocator->GetRecorder() != VMA_NULL)
13014  {
13015  allocator->GetRecorder()->RecordUnmapMemory(
13016  allocator->GetCurrentFrameIndex(),
13017  allocation);
13018  }
13019 #endif
13020 
13021  allocator->Unmap(allocation);
13022 }
13023 
13024 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13025 {
13026  VMA_ASSERT(allocator && allocation);
13027 
13028  VMA_DEBUG_LOG("vmaFlushAllocation");
13029 
13030  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13031 
13032  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13033 
13034 #if VMA_RECORDING_ENABLED
13035  if(allocator->GetRecorder() != VMA_NULL)
13036  {
13037  allocator->GetRecorder()->RecordFlushAllocation(
13038  allocator->GetCurrentFrameIndex(),
13039  allocation, offset, size);
13040  }
13041 #endif
13042 }
13043 
13044 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13045 {
13046  VMA_ASSERT(allocator && allocation);
13047 
13048  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13049 
13050  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13051 
13052  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13053 
13054 #if VMA_RECORDING_ENABLED
13055  if(allocator->GetRecorder() != VMA_NULL)
13056  {
13057  allocator->GetRecorder()->RecordInvalidateAllocation(
13058  allocator->GetCurrentFrameIndex(),
13059  allocation, offset, size);
13060  }
13061 #endif
13062 }
13063 
13064 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
13065 {
13066  VMA_ASSERT(allocator);
13067 
13068  VMA_DEBUG_LOG("vmaCheckCorruption");
13069 
13070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13071 
13072  return allocator->CheckCorruption(memoryTypeBits);
13073 }
13074 
13075 VkResult vmaDefragment(
13076  VmaAllocator allocator,
13077  VmaAllocation* pAllocations,
13078  size_t allocationCount,
13079  VkBool32* pAllocationsChanged,
13080  const VmaDefragmentationInfo *pDefragmentationInfo,
13081  VmaDefragmentationStats* pDefragmentationStats)
13082 {
13083  VMA_ASSERT(allocator && pAllocations);
13084 
13085  VMA_DEBUG_LOG("vmaDefragment");
13086 
13087  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13088 
13089  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
13090 }
13091 
13092 VkResult vmaBindBufferMemory(
13093  VmaAllocator allocator,
13094  VmaAllocation allocation,
13095  VkBuffer buffer)
13096 {
13097  VMA_ASSERT(allocator && allocation && buffer);
13098 
13099  VMA_DEBUG_LOG("vmaBindBufferMemory");
13100 
13101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13102 
13103  return allocator->BindBufferMemory(allocation, buffer);
13104 }
13105 
13106 VkResult vmaBindImageMemory(
13107  VmaAllocator allocator,
13108  VmaAllocation allocation,
13109  VkImage image)
13110 {
13111  VMA_ASSERT(allocator && allocation && image);
13112 
13113  VMA_DEBUG_LOG("vmaBindImageMemory");
13114 
13115  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13116 
13117  return allocator->BindImageMemory(allocation, image);
13118 }
13119 
13120 VkResult vmaCreateBuffer(
13121  VmaAllocator allocator,
13122  const VkBufferCreateInfo* pBufferCreateInfo,
13123  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13124  VkBuffer* pBuffer,
13125  VmaAllocation* pAllocation,
13126  VmaAllocationInfo* pAllocationInfo)
13127 {
13128  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
13129 
13130  VMA_DEBUG_LOG("vmaCreateBuffer");
13131 
13132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13133 
13134  *pBuffer = VK_NULL_HANDLE;
13135  *pAllocation = VK_NULL_HANDLE;
13136 
13137  // 1. Create VkBuffer.
13138  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
13139  allocator->m_hDevice,
13140  pBufferCreateInfo,
13141  allocator->GetAllocationCallbacks(),
13142  pBuffer);
13143  if(res >= 0)
13144  {
13145  // 2. vkGetBufferMemoryRequirements.
13146  VkMemoryRequirements vkMemReq = {};
13147  bool requiresDedicatedAllocation = false;
13148  bool prefersDedicatedAllocation = false;
13149  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
13150  requiresDedicatedAllocation, prefersDedicatedAllocation);
13151 
13152  // Make sure alignment requirements for specific buffer usages reported
13153  // in Physical Device Properties are included in alignment reported by memory requirements.
13154  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
13155  {
13156  VMA_ASSERT(vkMemReq.alignment %
13157  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
13158  }
13159  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
13160  {
13161  VMA_ASSERT(vkMemReq.alignment %
13162  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
13163  }
13164  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
13165  {
13166  VMA_ASSERT(vkMemReq.alignment %
13167  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
13168  }
13169 
13170  // 3. Allocate memory using allocator.
13171  res = allocator->AllocateMemory(
13172  vkMemReq,
13173  requiresDedicatedAllocation,
13174  prefersDedicatedAllocation,
13175  *pBuffer, // dedicatedBuffer
13176  VK_NULL_HANDLE, // dedicatedImage
13177  *pAllocationCreateInfo,
13178  VMA_SUBALLOCATION_TYPE_BUFFER,
13179  pAllocation);
13180 
13181 #if VMA_RECORDING_ENABLED
13182  if(allocator->GetRecorder() != VMA_NULL)
13183  {
13184  allocator->GetRecorder()->RecordCreateBuffer(
13185  allocator->GetCurrentFrameIndex(),
13186  *pBufferCreateInfo,
13187  *pAllocationCreateInfo,
13188  *pAllocation);
13189  }
13190 #endif
13191 
13192  if(res >= 0)
13193  {
13194  // 3. Bind buffer with memory.
13195  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
13196  if(res >= 0)
13197  {
13198  // All steps succeeded.
13199  #if VMA_STATS_STRING_ENABLED
13200  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
13201  #endif
13202  if(pAllocationInfo != VMA_NULL)
13203  {
13204  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13205  }
13206 
13207  return VK_SUCCESS;
13208  }
13209  allocator->FreeMemory(*pAllocation);
13210  *pAllocation = VK_NULL_HANDLE;
13211  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13212  *pBuffer = VK_NULL_HANDLE;
13213  return res;
13214  }
13215  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13216  *pBuffer = VK_NULL_HANDLE;
13217  return res;
13218  }
13219  return res;
13220 }
13221 
13222 void vmaDestroyBuffer(
13223  VmaAllocator allocator,
13224  VkBuffer buffer,
13225  VmaAllocation allocation)
13226 {
13227  VMA_ASSERT(allocator);
13228 
13229  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13230  {
13231  return;
13232  }
13233 
13234  VMA_DEBUG_LOG("vmaDestroyBuffer");
13235 
13236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13237 
13238 #if VMA_RECORDING_ENABLED
13239  if(allocator->GetRecorder() != VMA_NULL)
13240  {
13241  allocator->GetRecorder()->RecordDestroyBuffer(
13242  allocator->GetCurrentFrameIndex(),
13243  allocation);
13244  }
13245 #endif
13246 
13247  if(buffer != VK_NULL_HANDLE)
13248  {
13249  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
13250  }
13251 
13252  if(allocation != VK_NULL_HANDLE)
13253  {
13254  allocator->FreeMemory(allocation);
13255  }
13256 }
13257 
13258 VkResult vmaCreateImage(
13259  VmaAllocator allocator,
13260  const VkImageCreateInfo* pImageCreateInfo,
13261  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13262  VkImage* pImage,
13263  VmaAllocation* pAllocation,
13264  VmaAllocationInfo* pAllocationInfo)
13265 {
13266  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
13267 
13268  VMA_DEBUG_LOG("vmaCreateImage");
13269 
13270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13271 
13272  *pImage = VK_NULL_HANDLE;
13273  *pAllocation = VK_NULL_HANDLE;
13274 
13275  // 1. Create VkImage.
13276  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
13277  allocator->m_hDevice,
13278  pImageCreateInfo,
13279  allocator->GetAllocationCallbacks(),
13280  pImage);
13281  if(res >= 0)
13282  {
13283  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
13284  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
13285  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
13286 
13287  // 2. Allocate memory using allocator.
13288  VkMemoryRequirements vkMemReq = {};
13289  bool requiresDedicatedAllocation = false;
13290  bool prefersDedicatedAllocation = false;
13291  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
13292  requiresDedicatedAllocation, prefersDedicatedAllocation);
13293 
13294  res = allocator->AllocateMemory(
13295  vkMemReq,
13296  requiresDedicatedAllocation,
13297  prefersDedicatedAllocation,
13298  VK_NULL_HANDLE, // dedicatedBuffer
13299  *pImage, // dedicatedImage
13300  *pAllocationCreateInfo,
13301  suballocType,
13302  pAllocation);
13303 
13304 #if VMA_RECORDING_ENABLED
13305  if(allocator->GetRecorder() != VMA_NULL)
13306  {
13307  allocator->GetRecorder()->RecordCreateImage(
13308  allocator->GetCurrentFrameIndex(),
13309  *pImageCreateInfo,
13310  *pAllocationCreateInfo,
13311  *pAllocation);
13312  }
13313 #endif
13314 
13315  if(res >= 0)
13316  {
13317  // 3. Bind image with memory.
13318  res = allocator->BindImageMemory(*pAllocation, *pImage);
13319  if(res >= 0)
13320  {
13321  // All steps succeeded.
13322  #if VMA_STATS_STRING_ENABLED
13323  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
13324  #endif
13325  if(pAllocationInfo != VMA_NULL)
13326  {
13327  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13328  }
13329 
13330  return VK_SUCCESS;
13331  }
13332  allocator->FreeMemory(*pAllocation);
13333  *pAllocation = VK_NULL_HANDLE;
13334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13335  *pImage = VK_NULL_HANDLE;
13336  return res;
13337  }
13338  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13339  *pImage = VK_NULL_HANDLE;
13340  return res;
13341  }
13342  return res;
13343 }
13344 
13345 void vmaDestroyImage(
13346  VmaAllocator allocator,
13347  VkImage image,
13348  VmaAllocation allocation)
13349 {
13350  VMA_ASSERT(allocator);
13351 
13352  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13353  {
13354  return;
13355  }
13356 
13357  VMA_DEBUG_LOG("vmaDestroyImage");
13358 
13359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13360 
13361 #if VMA_RECORDING_ENABLED
13362  if(allocator->GetRecorder() != VMA_NULL)
13363  {
13364  allocator->GetRecorder()->RecordDestroyImage(
13365  allocator->GetCurrentFrameIndex(),
13366  allocation);
13367  }
13368 #endif
13369 
13370  if(image != VK_NULL_HANDLE)
13371  {
13372  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
13373  }
13374  if(allocation != VK_NULL_HANDLE)
13375  {
13376  allocator->FreeMemory(allocation);
13377  }
13378 }
13379 
13380 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1429
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1342 #include <vulkan/vulkan.h>
1343 
1344 #if !defined(VMA_DEDICATED_ALLOCATION)
1345  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1346  #define VMA_DEDICATED_ALLOCATION 1
1347  #else
1348  #define VMA_DEDICATED_ALLOCATION 0
1349  #endif
1350 #endif
1351 
1361 VK_DEFINE_HANDLE(VmaAllocator)
1362 
1363 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1365  VmaAllocator allocator,
1366  uint32_t memoryType,
1367  VkDeviceMemory memory,
1368  VkDeviceSize size);
1370 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1371  VmaAllocator allocator,
1372  uint32_t memoryType,
1373  VkDeviceMemory memory,
1374  VkDeviceSize size);
1375 
1389 
1419 
1422 typedef VkFlags VmaAllocatorCreateFlags;
1423 
1428 typedef struct VmaVulkanFunctions {
1429  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1430  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1431  PFN_vkAllocateMemory vkAllocateMemory;
1432  PFN_vkFreeMemory vkFreeMemory;
1433  PFN_vkMapMemory vkMapMemory;
1434  PFN_vkUnmapMemory vkUnmapMemory;
1435  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1436  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1437  PFN_vkBindBufferMemory vkBindBufferMemory;
1438  PFN_vkBindImageMemory vkBindImageMemory;
1439  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1440  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1441  PFN_vkCreateBuffer vkCreateBuffer;
1442  PFN_vkDestroyBuffer vkDestroyBuffer;
1443  PFN_vkCreateImage vkCreateImage;
1444  PFN_vkDestroyImage vkDestroyImage;
1445 #if VMA_DEDICATED_ALLOCATION
1446  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1447  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1448 #endif
1450 
1452 typedef enum VmaRecordFlagBits {
1459 
1462 typedef VkFlags VmaRecordFlags;
1463 
1464 /*
1465 Define this macro to 0/1 to disable/enable support for recording functionality,
1466 available through VmaAllocatorCreateInfo::pRecordSettings.
1467 */
1468 #ifndef VMA_RECORDING_ENABLED
1469  #ifdef _WIN32
1470  #define VMA_RECORDING_ENABLED 1
1471  #else
1472  #define VMA_RECORDING_ENABLED 0
1473  #endif
1474 #endif
1475 
1477 typedef struct VmaRecordSettings
1478 {
1480  VmaRecordFlags flags;
1488  const char* pFilePath;
1490 
1493 {
1495  VmaAllocatorCreateFlags flags;
1497 
1498  VkPhysicalDevice physicalDevice;
1500 
1501  VkDevice device;
1503 
1506 
1507  const VkAllocationCallbacks* pAllocationCallbacks;
1509 
1548  const VkDeviceSize* pHeapSizeLimit;
1569 
1571 VkResult vmaCreateAllocator(
1572  const VmaAllocatorCreateInfo* pCreateInfo,
1573  VmaAllocator* pAllocator);
1574 
1576 void vmaDestroyAllocator(
1577  VmaAllocator allocator);
1578 
1584  VmaAllocator allocator,
1585  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1586 
1592  VmaAllocator allocator,
1593  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1594 
1602  VmaAllocator allocator,
1603  uint32_t memoryTypeIndex,
1604  VkMemoryPropertyFlags* pFlags);
1605 
1615  VmaAllocator allocator,
1616  uint32_t frameIndex);
1617 
1620 typedef struct VmaStatInfo
1621 {
1623  uint32_t blockCount;
1629  VkDeviceSize usedBytes;
1631  VkDeviceSize unusedBytes;
1632  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1633  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1634 } VmaStatInfo;
1635 
1637 typedef struct VmaStats
1638 {
1639  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1640  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1642 } VmaStats;
1643 
1645 void vmaCalculateStats(
1646  VmaAllocator allocator,
1647  VmaStats* pStats);
1648 
1649 #define VMA_STATS_STRING_ENABLED 1
1650 
1651 #if VMA_STATS_STRING_ENABLED
1652 
1654 
1656 void vmaBuildStatsString(
1657  VmaAllocator allocator,
1658  char** ppStatsString,
1659  VkBool32 detailedMap);
1660 
1661 void vmaFreeStatsString(
1662  VmaAllocator allocator,
1663  char* pStatsString);
1664 
1665 #endif // #if VMA_STATS_STRING_ENABLED
1666 
1675 VK_DEFINE_HANDLE(VmaPool)
1676 
1677 typedef enum VmaMemoryUsage
1678 {
1727 } VmaMemoryUsage;
1728 
1743 
1798 
1802 
1804 {
1806  VmaAllocationCreateFlags flags;
1817  VkMemoryPropertyFlags requiredFlags;
1822  VkMemoryPropertyFlags preferredFlags;
1830  uint32_t memoryTypeBits;
1843  void* pUserData;
1845 
1862 VkResult vmaFindMemoryTypeIndex(
1863  VmaAllocator allocator,
1864  uint32_t memoryTypeBits,
1865  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1866  uint32_t* pMemoryTypeIndex);
1867 
1881  VmaAllocator allocator,
1882  const VkBufferCreateInfo* pBufferCreateInfo,
1883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1884  uint32_t* pMemoryTypeIndex);
1885 
1899  VmaAllocator allocator,
1900  const VkImageCreateInfo* pImageCreateInfo,
1901  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1902  uint32_t* pMemoryTypeIndex);
1903 
1924 
1939 
1942 typedef VkFlags VmaPoolCreateFlags;
1943 
1946 typedef struct VmaPoolCreateInfo {
1952  VmaPoolCreateFlags flags;
1957  VkDeviceSize blockSize;
1987 
1990 typedef struct VmaPoolStats {
1993  VkDeviceSize size;
1996  VkDeviceSize unusedSize;
2009  VkDeviceSize unusedRangeSizeMax;
2010 } VmaPoolStats;
2011 
2018 VkResult vmaCreatePool(
2019  VmaAllocator allocator,
2020  const VmaPoolCreateInfo* pCreateInfo,
2021  VmaPool* pPool);
2022 
2025 void vmaDestroyPool(
2026  VmaAllocator allocator,
2027  VmaPool pool);
2028 
2035 void vmaGetPoolStats(
2036  VmaAllocator allocator,
2037  VmaPool pool,
2038  VmaPoolStats* pPoolStats);
2039 
2047  VmaAllocator allocator,
2048  VmaPool pool,
2049  size_t* pLostAllocationCount);
2050 
2065 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2066 
2091 VK_DEFINE_HANDLE(VmaAllocation)
2092 
2093 
2095 typedef struct VmaAllocationInfo {
2100  uint32_t memoryType;
2109  VkDeviceMemory deviceMemory;
2114  VkDeviceSize offset;
2119  VkDeviceSize size;
2133  void* pUserData;
2135 
2146 VkResult vmaAllocateMemory(
2147  VmaAllocator allocator,
2148  const VkMemoryRequirements* pVkMemoryRequirements,
2149  const VmaAllocationCreateInfo* pCreateInfo,
2150  VmaAllocation* pAllocation,
2151  VmaAllocationInfo* pAllocationInfo);
2152 
2160  VmaAllocator allocator,
2161  VkBuffer buffer,
2162  const VmaAllocationCreateInfo* pCreateInfo,
2163  VmaAllocation* pAllocation,
2164  VmaAllocationInfo* pAllocationInfo);
2165 
2167 VkResult vmaAllocateMemoryForImage(
2168  VmaAllocator allocator,
2169  VkImage image,
2170  const VmaAllocationCreateInfo* pCreateInfo,
2171  VmaAllocation* pAllocation,
2172  VmaAllocationInfo* pAllocationInfo);
2173 
2175 void vmaFreeMemory(
2176  VmaAllocator allocator,
2177  VmaAllocation allocation);
2178 
2196  VmaAllocator allocator,
2197  VmaAllocation allocation,
2198  VmaAllocationInfo* pAllocationInfo);
2199 
2214 VkBool32 vmaTouchAllocation(
2215  VmaAllocator allocator,
2216  VmaAllocation allocation);
2217 
2232  VmaAllocator allocator,
2233  VmaAllocation allocation,
2234  void* pUserData);
2235 
2247  VmaAllocator allocator,
2248  VmaAllocation* pAllocation);
2249 
2284 VkResult vmaMapMemory(
2285  VmaAllocator allocator,
2286  VmaAllocation allocation,
2287  void** ppData);
2288 
2293 void vmaUnmapMemory(
2294  VmaAllocator allocator,
2295  VmaAllocation allocation);
2296 
2309 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2310 
2323 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2324 
2341 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2342 
2344 typedef struct VmaDefragmentationInfo {
2349  VkDeviceSize maxBytesToMove;
2356 
2358 typedef struct VmaDefragmentationStats {
2360  VkDeviceSize bytesMoved;
2362  VkDeviceSize bytesFreed;
2368 
2455 VkResult vmaDefragment(
2456  VmaAllocator allocator,
2457  VmaAllocation* pAllocations,
2458  size_t allocationCount,
2459  VkBool32* pAllocationsChanged,
2460  const VmaDefragmentationInfo *pDefragmentationInfo,
2461  VmaDefragmentationStats* pDefragmentationStats);
2462 
2475 VkResult vmaBindBufferMemory(
2476  VmaAllocator allocator,
2477  VmaAllocation allocation,
2478  VkBuffer buffer);
2479 
2492 VkResult vmaBindImageMemory(
2493  VmaAllocator allocator,
2494  VmaAllocation allocation,
2495  VkImage image);
2496 
2523 VkResult vmaCreateBuffer(
2524  VmaAllocator allocator,
2525  const VkBufferCreateInfo* pBufferCreateInfo,
2526  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2527  VkBuffer* pBuffer,
2528  VmaAllocation* pAllocation,
2529  VmaAllocationInfo* pAllocationInfo);
2530 
2542 void vmaDestroyBuffer(
2543  VmaAllocator allocator,
2544  VkBuffer buffer,
2545  VmaAllocation allocation);
2546 
2548 VkResult vmaCreateImage(
2549  VmaAllocator allocator,
2550  const VkImageCreateInfo* pImageCreateInfo,
2551  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2552  VkImage* pImage,
2553  VmaAllocation* pAllocation,
2554  VmaAllocationInfo* pAllocationInfo);
2555 
2567 void vmaDestroyImage(
2568  VmaAllocator allocator,
2569  VkImage image,
2570  VmaAllocation allocation);
2571 
2572 #ifdef __cplusplus
2573 }
2574 #endif
2575 
2576 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2577 
2578 // For Visual Studio IntelliSense.
2579 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2580 #define VMA_IMPLEMENTATION
2581 #endif
2582 
2583 #ifdef VMA_IMPLEMENTATION
2584 #undef VMA_IMPLEMENTATION
2585 
2586 #include <cstdint>
2587 #include <cstdlib>
2588 #include <cstring>
2589 
2590 /*******************************************************************************
2591 CONFIGURATION SECTION
2592 
2593 Define some of these macros before each #include of this header or change them
2594 here if you need other then default behavior depending on your environment.
2595 */
2596 
2597 /*
2598 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2599 internally, like:
2600 
2601  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2602 
2603 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2604 VmaAllocatorCreateInfo::pVulkanFunctions.
2605 */
2606 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2607 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2608 #endif
2609 
2610 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2611 //#define VMA_USE_STL_CONTAINERS 1
2612 
2613 /* Set this macro to 1 to make the library including and using STL containers:
2614 std::pair, std::vector, std::list, std::unordered_map.
2615 
2616 Set it to 0 or undefined to make the library using its own implementation of
2617 the containers.
2618 */
2619 #if VMA_USE_STL_CONTAINERS
2620  #define VMA_USE_STL_VECTOR 1
2621  #define VMA_USE_STL_UNORDERED_MAP 1
2622  #define VMA_USE_STL_LIST 1
2623 #endif
2624 
2625 #if VMA_USE_STL_VECTOR
2626  #include <vector>
2627 #endif
2628 
2629 #if VMA_USE_STL_UNORDERED_MAP
2630  #include <unordered_map>
2631 #endif
2632 
2633 #if VMA_USE_STL_LIST
2634  #include <list>
2635 #endif
2636 
2637 /*
2638 Following headers are used in this CONFIGURATION section only, so feel free to
2639 remove them if not needed.
2640 */
2641 #include <cassert> // for assert
2642 #include <algorithm> // for min, max
2643 #include <mutex> // for std::mutex
2644 #include <atomic> // for std::atomic
2645 
2646 #ifndef VMA_NULL
2647  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2648  #define VMA_NULL nullptr
2649 #endif
2650 
2651 #if defined(__APPLE__) || defined(__ANDROID__)
2652 #include <cstdlib>
2653 void *aligned_alloc(size_t alignment, size_t size)
2654 {
2655  // alignment must be >= sizeof(void*)
2656  if(alignment < sizeof(void*))
2657  {
2658  alignment = sizeof(void*);
2659  }
2660 
2661  void *pointer;
2662  if(posix_memalign(&pointer, alignment, size) == 0)
2663  return pointer;
2664  return VMA_NULL;
2665 }
2666 #endif
2667 
2668 // If your compiler is not compatible with C++11 and definition of
2669 // aligned_alloc() function is missing, uncommeting following line may help:
2670 
2671 //#include <malloc.h>
2672 
2673 // Normal assert to check for programmer's errors, especially in Debug configuration.
2674 #ifndef VMA_ASSERT
2675  #ifdef _DEBUG
2676  #define VMA_ASSERT(expr) assert(expr)
2677  #else
2678  #define VMA_ASSERT(expr)
2679  #endif
2680 #endif
2681 
2682 // Assert that will be called very often, like inside data structures e.g. operator[].
2683 // Making it non-empty can make program slow.
2684 #ifndef VMA_HEAVY_ASSERT
2685  #ifdef _DEBUG
2686  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2687  #else
2688  #define VMA_HEAVY_ASSERT(expr)
2689  #endif
2690 #endif
2691 
2692 #ifndef VMA_ALIGN_OF
2693  #define VMA_ALIGN_OF(type) (__alignof(type))
2694 #endif
2695 
2696 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2697  #if defined(_WIN32)
2698  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2699  #else
2700  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2701  #endif
2702 #endif
2703 
2704 #ifndef VMA_SYSTEM_FREE
2705  #if defined(_WIN32)
2706  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2707  #else
2708  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2709  #endif
2710 #endif
2711 
2712 #ifndef VMA_MIN
2713  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2714 #endif
2715 
2716 #ifndef VMA_MAX
2717  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2718 #endif
2719 
2720 #ifndef VMA_SWAP
2721  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2722 #endif
2723 
2724 #ifndef VMA_SORT
2725  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2726 #endif
2727 
2728 #ifndef VMA_DEBUG_LOG
2729  #define VMA_DEBUG_LOG(format, ...)
2730  /*
2731  #define VMA_DEBUG_LOG(format, ...) do { \
2732  printf(format, __VA_ARGS__); \
2733  printf("\n"); \
2734  } while(false)
2735  */
2736 #endif
2737 
2738 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2739 #if VMA_STATS_STRING_ENABLED
2740  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2741  {
2742  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2743  }
2744  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2745  {
2746  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2747  }
2748  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2749  {
2750  snprintf(outStr, strLen, "%p", ptr);
2751  }
2752 #endif
2753 
2754 #ifndef VMA_MUTEX
2755  class VmaMutex
2756  {
2757  public:
2758  VmaMutex() { }
2759  ~VmaMutex() { }
2760  void Lock() { m_Mutex.lock(); }
2761  void Unlock() { m_Mutex.unlock(); }
2762  private:
2763  std::mutex m_Mutex;
2764  };
2765  #define VMA_MUTEX VmaMutex
2766 #endif
2767 
2768 /*
2769 If providing your own implementation, you need to implement a subset of std::atomic:
2770 
2771 - Constructor(uint32_t desired)
2772 - uint32_t load() const
2773 - void store(uint32_t desired)
2774 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2775 */
2776 #ifndef VMA_ATOMIC_UINT32
2777  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2778 #endif
2779 
2780 #ifndef VMA_BEST_FIT
2781 
2793  #define VMA_BEST_FIT (1)
2794 #endif
2795 
2796 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2797 
2801  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2802 #endif
2803 
2804 #ifndef VMA_DEBUG_ALIGNMENT
2805 
2809  #define VMA_DEBUG_ALIGNMENT (1)
2810 #endif
2811 
2812 #ifndef VMA_DEBUG_MARGIN
2813 
2817  #define VMA_DEBUG_MARGIN (0)
2818 #endif
2819 
2820 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2821 
2825  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2826 #endif
2827 
2828 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2829 
2834  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2835 #endif
2836 
2837 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2838 
2842  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2843 #endif
2844 
2845 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2846 
2850  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2851 #endif
2852 
2853 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2854  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2856 #endif
2857 
2858 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2859  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2861 #endif
2862 
2863 #ifndef VMA_CLASS_NO_COPY
2864  #define VMA_CLASS_NO_COPY(className) \
2865  private: \
2866  className(const className&) = delete; \
2867  className& operator=(const className&) = delete;
2868 #endif
2869 
2870 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2871 
2872 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2873 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2874 
2875 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2876 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2877 
2878 /*******************************************************************************
2879 END OF CONFIGURATION
2880 */
2881 
2882 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2883  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2884 
2885 // Returns number of bits set to 1 in (v).
2886 static inline uint32_t VmaCountBitsSet(uint32_t v)
2887 {
2888  uint32_t c = v - ((v >> 1) & 0x55555555);
2889  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2890  c = ((c >> 4) + c) & 0x0F0F0F0F;
2891  c = ((c >> 8) + c) & 0x00FF00FF;
2892  c = ((c >> 16) + c) & 0x0000FFFF;
2893  return c;
2894 }
2895 
2896 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2897 // Use types like uint32_t, uint64_t as T.
2898 template <typename T>
2899 static inline T VmaAlignUp(T val, T align)
2900 {
2901  return (val + align - 1) / align * align;
2902 }
2903 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2904 // Use types like uint32_t, uint64_t as T.
2905 template <typename T>
2906 static inline T VmaAlignDown(T val, T align)
2907 {
2908  return val / align * align;
2909 }
2910 
2911 // Division with mathematical rounding to nearest number.
2912 template <typename T>
2913 inline T VmaRoundDiv(T x, T y)
2914 {
2915  return (x + (y / (T)2)) / y;
2916 }
2917 
2918 static inline bool VmaStrIsEmpty(const char* pStr)
2919 {
2920  return pStr == VMA_NULL || *pStr == '\0';
2921 }
2922 
2923 #ifndef VMA_SORT
2924 
2925 template<typename Iterator, typename Compare>
2926 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2927 {
2928  Iterator centerValue = end; --centerValue;
2929  Iterator insertIndex = beg;
2930  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2931  {
2932  if(cmp(*memTypeIndex, *centerValue))
2933  {
2934  if(insertIndex != memTypeIndex)
2935  {
2936  VMA_SWAP(*memTypeIndex, *insertIndex);
2937  }
2938  ++insertIndex;
2939  }
2940  }
2941  if(insertIndex != centerValue)
2942  {
2943  VMA_SWAP(*insertIndex, *centerValue);
2944  }
2945  return insertIndex;
2946 }
2947 
2948 template<typename Iterator, typename Compare>
2949 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2950 {
2951  if(beg < end)
2952  {
2953  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2954  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2955  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2956  }
2957 }
2958 
2959 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2960 
2961 #endif // #ifndef VMA_SORT
2962 
2963 /*
2964 Returns true if two memory blocks occupy overlapping pages.
2965 ResourceA must be in less memory offset than ResourceB.
2966 
2967 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2968 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2969 */
2970 static inline bool VmaBlocksOnSamePage(
2971  VkDeviceSize resourceAOffset,
2972  VkDeviceSize resourceASize,
2973  VkDeviceSize resourceBOffset,
2974  VkDeviceSize pageSize)
2975 {
2976  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2977  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2978  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2979  VkDeviceSize resourceBStart = resourceBOffset;
2980  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2981  return resourceAEndPage == resourceBStartPage;
2982 }
2983 
2984 enum VmaSuballocationType
2985 {
2986  VMA_SUBALLOCATION_TYPE_FREE = 0,
2987  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2988  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2989  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2990  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2991  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2992  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2993 };
2994 
2995 /*
2996 Returns true if given suballocation types could conflict and must respect
2997 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2998 or linear image and another one is optimal image. If type is unknown, behave
2999 conservatively.
3000 */
3001 static inline bool VmaIsBufferImageGranularityConflict(
3002  VmaSuballocationType suballocType1,
3003  VmaSuballocationType suballocType2)
3004 {
3005  if(suballocType1 > suballocType2)
3006  {
3007  VMA_SWAP(suballocType1, suballocType2);
3008  }
3009 
3010  switch(suballocType1)
3011  {
3012  case VMA_SUBALLOCATION_TYPE_FREE:
3013  return false;
3014  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3015  return true;
3016  case VMA_SUBALLOCATION_TYPE_BUFFER:
3017  return
3018  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3019  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3020  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3021  return
3022  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3023  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3024  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3025  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3026  return
3027  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3028  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3029  return false;
3030  default:
3031  VMA_ASSERT(0);
3032  return true;
3033  }
3034 }
3035 
3036 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3037 {
3038  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3039  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3040  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3041  {
3042  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3043  }
3044 }
3045 
3046 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3047 {
3048  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3049  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3050  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3051  {
3052  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3053  {
3054  return false;
3055  }
3056  }
3057  return true;
3058 }
3059 
3060 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3061 struct VmaMutexLock
3062 {
3063  VMA_CLASS_NO_COPY(VmaMutexLock)
3064 public:
3065  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3066  m_pMutex(useMutex ? &mutex : VMA_NULL)
3067  {
3068  if(m_pMutex)
3069  {
3070  m_pMutex->Lock();
3071  }
3072  }
3073 
3074  ~VmaMutexLock()
3075  {
3076  if(m_pMutex)
3077  {
3078  m_pMutex->Unlock();
3079  }
3080  }
3081 
3082 private:
3083  VMA_MUTEX* m_pMutex;
3084 };
3085 
3086 #if VMA_DEBUG_GLOBAL_MUTEX
3087  static VMA_MUTEX gDebugGlobalMutex;
3088  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3089 #else
3090  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3091 #endif
3092 
3093 // Minimum size of a free suballocation to register it in the free suballocation collection.
3094 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3095 
3096 /*
3097 Performs binary search and returns iterator to first element that is greater or
3098 equal to (key), according to comparison (cmp).
3099 
3100 Cmp should return true if first argument is less than second argument.
3101 
3102 Returned value is the found element, if present in the collection or place where
3103 new element with value (key) should be inserted.
3104 */
3105 template <typename CmpLess, typename IterT, typename KeyT>
3106 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3107 {
3108  size_t down = 0, up = (end - beg);
3109  while(down < up)
3110  {
3111  const size_t mid = (down + up) / 2;
3112  if(cmp(*(beg+mid), key))
3113  {
3114  down = mid + 1;
3115  }
3116  else
3117  {
3118  up = mid;
3119  }
3120  }
3121  return beg + down;
3122 }
3123 
3125 // Memory allocation
3126 
3127 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3128 {
3129  if((pAllocationCallbacks != VMA_NULL) &&
3130  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3131  {
3132  return (*pAllocationCallbacks->pfnAllocation)(
3133  pAllocationCallbacks->pUserData,
3134  size,
3135  alignment,
3136  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3137  }
3138  else
3139  {
3140  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3141  }
3142 }
3143 
3144 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3145 {
3146  if((pAllocationCallbacks != VMA_NULL) &&
3147  (pAllocationCallbacks->pfnFree != VMA_NULL))
3148  {
3149  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3150  }
3151  else
3152  {
3153  VMA_SYSTEM_FREE(ptr);
3154  }
3155 }
3156 
3157 template<typename T>
3158 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3159 {
3160  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3161 }
3162 
3163 template<typename T>
3164 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3165 {
3166  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3167 }
3168 
3169 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3170 
3171 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3172 
3173 template<typename T>
3174 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3175 {
3176  ptr->~T();
3177  VmaFree(pAllocationCallbacks, ptr);
3178 }
3179 
3180 template<typename T>
3181 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3182 {
3183  if(ptr != VMA_NULL)
3184  {
3185  for(size_t i = count; i--; )
3186  {
3187  ptr[i].~T();
3188  }
3189  VmaFree(pAllocationCallbacks, ptr);
3190  }
3191 }
3192 
3193 // STL-compatible allocator.
3194 template<typename T>
3195 class VmaStlAllocator
3196 {
3197 public:
3198  const VkAllocationCallbacks* const m_pCallbacks;
3199  typedef T value_type;
3200 
3201  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3202  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3203 
3204  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3205  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3206 
3207  template<typename U>
3208  bool operator==(const VmaStlAllocator<U>& rhs) const
3209  {
3210  return m_pCallbacks == rhs.m_pCallbacks;
3211  }
3212  template<typename U>
3213  bool operator!=(const VmaStlAllocator<U>& rhs) const
3214  {
3215  return m_pCallbacks != rhs.m_pCallbacks;
3216  }
3217 
3218  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3219 };
3220 
3221 #if VMA_USE_STL_VECTOR
3222 
3223 #define VmaVector std::vector
3224 
3225 template<typename T, typename allocatorT>
3226 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3227 {
3228  vec.insert(vec.begin() + index, item);
3229 }
3230 
3231 template<typename T, typename allocatorT>
3232 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3233 {
3234  vec.erase(vec.begin() + index);
3235 }
3236 
3237 #else // #if VMA_USE_STL_VECTOR
3238 
3239 /* Class with interface compatible with subset of std::vector.
3240 T must be POD because constructors and destructors are not called and memcpy is
3241 used for these objects. */
3242 template<typename T, typename AllocatorT>
3243 class VmaVector
3244 {
3245 public:
3246  typedef T value_type;
3247 
3248  VmaVector(const AllocatorT& allocator) :
3249  m_Allocator(allocator),
3250  m_pArray(VMA_NULL),
3251  m_Count(0),
3252  m_Capacity(0)
3253  {
3254  }
3255 
3256  VmaVector(size_t count, const AllocatorT& allocator) :
3257  m_Allocator(allocator),
3258  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3259  m_Count(count),
3260  m_Capacity(count)
3261  {
3262  }
3263 
3264  VmaVector(const VmaVector<T, AllocatorT>& src) :
3265  m_Allocator(src.m_Allocator),
3266  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3267  m_Count(src.m_Count),
3268  m_Capacity(src.m_Count)
3269  {
3270  if(m_Count != 0)
3271  {
3272  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3273  }
3274  }
3275 
3276  ~VmaVector()
3277  {
3278  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3279  }
3280 
3281  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3282  {
3283  if(&rhs != this)
3284  {
3285  resize(rhs.m_Count);
3286  if(m_Count != 0)
3287  {
3288  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3289  }
3290  }
3291  return *this;
3292  }
3293 
3294  bool empty() const { return m_Count == 0; }
3295  size_t size() const { return m_Count; }
3296  T* data() { return m_pArray; }
3297  const T* data() const { return m_pArray; }
3298 
3299  T& operator[](size_t index)
3300  {
3301  VMA_HEAVY_ASSERT(index < m_Count);
3302  return m_pArray[index];
3303  }
3304  const T& operator[](size_t index) const
3305  {
3306  VMA_HEAVY_ASSERT(index < m_Count);
3307  return m_pArray[index];
3308  }
3309 
3310  T& front()
3311  {
3312  VMA_HEAVY_ASSERT(m_Count > 0);
3313  return m_pArray[0];
3314  }
3315  const T& front() const
3316  {
3317  VMA_HEAVY_ASSERT(m_Count > 0);
3318  return m_pArray[0];
3319  }
3320  T& back()
3321  {
3322  VMA_HEAVY_ASSERT(m_Count > 0);
3323  return m_pArray[m_Count - 1];
3324  }
3325  const T& back() const
3326  {
3327  VMA_HEAVY_ASSERT(m_Count > 0);
3328  return m_pArray[m_Count - 1];
3329  }
3330 
3331  void reserve(size_t newCapacity, bool freeMemory = false)
3332  {
3333  newCapacity = VMA_MAX(newCapacity, m_Count);
3334 
3335  if((newCapacity < m_Capacity) && !freeMemory)
3336  {
3337  newCapacity = m_Capacity;
3338  }
3339 
3340  if(newCapacity != m_Capacity)
3341  {
3342  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3343  if(m_Count != 0)
3344  {
3345  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3346  }
3347  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3348  m_Capacity = newCapacity;
3349  m_pArray = newArray;
3350  }
3351  }
3352 
3353  void resize(size_t newCount, bool freeMemory = false)
3354  {
3355  size_t newCapacity = m_Capacity;
3356  if(newCount > m_Capacity)
3357  {
3358  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3359  }
3360  else if(freeMemory)
3361  {
3362  newCapacity = newCount;
3363  }
3364 
3365  if(newCapacity != m_Capacity)
3366  {
3367  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3368  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3369  if(elementsToCopy != 0)
3370  {
3371  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3372  }
3373  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3374  m_Capacity = newCapacity;
3375  m_pArray = newArray;
3376  }
3377 
3378  m_Count = newCount;
3379  }
3380 
3381  void clear(bool freeMemory = false)
3382  {
3383  resize(0, freeMemory);
3384  }
3385 
3386  void insert(size_t index, const T& src)
3387  {
3388  VMA_HEAVY_ASSERT(index <= m_Count);
3389  const size_t oldCount = size();
3390  resize(oldCount + 1);
3391  if(index < oldCount)
3392  {
3393  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3394  }
3395  m_pArray[index] = src;
3396  }
3397 
3398  void remove(size_t index)
3399  {
3400  VMA_HEAVY_ASSERT(index < m_Count);
3401  const size_t oldCount = size();
3402  if(index < oldCount - 1)
3403  {
3404  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3405  }
3406  resize(oldCount - 1);
3407  }
3408 
3409  void push_back(const T& src)
3410  {
3411  const size_t newIndex = size();
3412  resize(newIndex + 1);
3413  m_pArray[newIndex] = src;
3414  }
3415 
3416  void pop_back()
3417  {
3418  VMA_HEAVY_ASSERT(m_Count > 0);
3419  resize(size() - 1);
3420  }
3421 
3422  void push_front(const T& src)
3423  {
3424  insert(0, src);
3425  }
3426 
3427  void pop_front()
3428  {
3429  VMA_HEAVY_ASSERT(m_Count > 0);
3430  remove(0);
3431  }
3432 
3433  typedef T* iterator;
3434 
3435  iterator begin() { return m_pArray; }
3436  iterator end() { return m_pArray + m_Count; }
3437 
3438 private:
3439  AllocatorT m_Allocator;
3440  T* m_pArray;
3441  size_t m_Count;
3442  size_t m_Capacity;
3443 };
3444 
3445 template<typename T, typename allocatorT>
3446 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3447 {
3448  vec.insert(index, item);
3449 }
3450 
3451 template<typename T, typename allocatorT>
3452 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3453 {
3454  vec.remove(index);
3455 }
3456 
3457 #endif // #if VMA_USE_STL_VECTOR
3458 
3459 template<typename CmpLess, typename VectorT>
3460 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3461 {
3462  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3463  vector.data(),
3464  vector.data() + vector.size(),
3465  value,
3466  CmpLess()) - vector.data();
3467  VmaVectorInsert(vector, indexToInsert, value);
3468  return indexToInsert;
3469 }
3470 
3471 template<typename CmpLess, typename VectorT>
3472 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3473 {
3474  CmpLess comparator;
3475  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3476  vector.begin(),
3477  vector.end(),
3478  value,
3479  comparator);
3480  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3481  {
3482  size_t indexToRemove = it - vector.begin();
3483  VmaVectorRemove(vector, indexToRemove);
3484  return true;
3485  }
3486  return false;
3487 }
3488 
3489 template<typename CmpLess, typename IterT, typename KeyT>
3490 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3491 {
3492  CmpLess comparator;
3493  typename IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3494  beg, end, value, comparator);
3495  if(it == end ||
3496  !comparator(*it, value) && !comparator(value, *it))
3497  {
3498  return it;
3499  }
3500  return end;
3501 }
3502 
3504 // class VmaPoolAllocator
3505 
3506 /*
3507 Allocator for objects of type T using a list of arrays (pools) to speed up
3508 allocation. Number of elements that can be allocated is not bounded because
3509 allocator can create multiple blocks.
3510 */
3511 template<typename T>
3512 class VmaPoolAllocator
3513 {
3514  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3515 public:
3516  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3517  ~VmaPoolAllocator();
3518  void Clear();
3519  T* Alloc();
3520  void Free(T* ptr);
3521 
3522 private:
3523  union Item
3524  {
3525  uint32_t NextFreeIndex;
3526  T Value;
3527  };
3528 
3529  struct ItemBlock
3530  {
3531  Item* pItems;
3532  uint32_t FirstFreeIndex;
3533  };
3534 
3535  const VkAllocationCallbacks* m_pAllocationCallbacks;
3536  size_t m_ItemsPerBlock;
3537  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3538 
3539  ItemBlock& CreateNewBlock();
3540 };
3541 
3542 template<typename T>
3543 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3544  m_pAllocationCallbacks(pAllocationCallbacks),
3545  m_ItemsPerBlock(itemsPerBlock),
3546  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3547 {
3548  VMA_ASSERT(itemsPerBlock > 0);
3549 }
3550 
3551 template<typename T>
3552 VmaPoolAllocator<T>::~VmaPoolAllocator()
3553 {
3554  Clear();
3555 }
3556 
3557 template<typename T>
3558 void VmaPoolAllocator<T>::Clear()
3559 {
3560  for(size_t i = m_ItemBlocks.size(); i--; )
3561  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3562  m_ItemBlocks.clear();
3563 }
3564 
3565 template<typename T>
3566 T* VmaPoolAllocator<T>::Alloc()
3567 {
3568  for(size_t i = m_ItemBlocks.size(); i--; )
3569  {
3570  ItemBlock& block = m_ItemBlocks[i];
3571  // This block has some free items: Use first one.
3572  if(block.FirstFreeIndex != UINT32_MAX)
3573  {
3574  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3575  block.FirstFreeIndex = pItem->NextFreeIndex;
3576  return &pItem->Value;
3577  }
3578  }
3579 
3580  // No block has free item: Create new one and use it.
3581  ItemBlock& newBlock = CreateNewBlock();
3582  Item* const pItem = &newBlock.pItems[0];
3583  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3584  return &pItem->Value;
3585 }
3586 
3587 template<typename T>
3588 void VmaPoolAllocator<T>::Free(T* ptr)
3589 {
3590  // Search all memory blocks to find ptr.
3591  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3592  {
3593  ItemBlock& block = m_ItemBlocks[i];
3594 
3595  // Casting to union.
3596  Item* pItemPtr;
3597  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3598 
3599  // Check if pItemPtr is in address range of this block.
3600  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3601  {
3602  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3603  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3604  block.FirstFreeIndex = index;
3605  return;
3606  }
3607  }
3608  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3609 }
3610 
3611 template<typename T>
3612 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3613 {
3614  ItemBlock newBlock = {
3615  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3616 
3617  m_ItemBlocks.push_back(newBlock);
3618 
3619  // Setup singly-linked list of all free items in this block.
3620  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3621  newBlock.pItems[i].NextFreeIndex = i + 1;
3622  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3623  return m_ItemBlocks.back();
3624 }
3625 
3627 // class VmaRawList, VmaList
3628 
3629 #if VMA_USE_STL_LIST
3630 
3631 #define VmaList std::list
3632 
3633 #else // #if VMA_USE_STL_LIST
3634 
3635 template<typename T>
3636 struct VmaListItem
3637 {
3638  VmaListItem* pPrev;
3639  VmaListItem* pNext;
3640  T Value;
3641 };
3642 
3643 // Doubly linked list.
3644 template<typename T>
3645 class VmaRawList
3646 {
3647  VMA_CLASS_NO_COPY(VmaRawList)
3648 public:
3649  typedef VmaListItem<T> ItemType;
3650 
3651  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3652  ~VmaRawList();
3653  void Clear();
3654 
3655  size_t GetCount() const { return m_Count; }
3656  bool IsEmpty() const { return m_Count == 0; }
3657 
3658  ItemType* Front() { return m_pFront; }
3659  const ItemType* Front() const { return m_pFront; }
3660  ItemType* Back() { return m_pBack; }
3661  const ItemType* Back() const { return m_pBack; }
3662 
3663  ItemType* PushBack();
3664  ItemType* PushFront();
3665  ItemType* PushBack(const T& value);
3666  ItemType* PushFront(const T& value);
3667  void PopBack();
3668  void PopFront();
3669 
3670  // Item can be null - it means PushBack.
3671  ItemType* InsertBefore(ItemType* pItem);
3672  // Item can be null - it means PushFront.
3673  ItemType* InsertAfter(ItemType* pItem);
3674 
3675  ItemType* InsertBefore(ItemType* pItem, const T& value);
3676  ItemType* InsertAfter(ItemType* pItem, const T& value);
3677 
3678  void Remove(ItemType* pItem);
3679 
3680 private:
3681  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3682  VmaPoolAllocator<ItemType> m_ItemAllocator;
3683  ItemType* m_pFront;
3684  ItemType* m_pBack;
3685  size_t m_Count;
3686 };
3687 
3688 template<typename T>
3689 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3690  m_pAllocationCallbacks(pAllocationCallbacks),
3691  m_ItemAllocator(pAllocationCallbacks, 128),
3692  m_pFront(VMA_NULL),
3693  m_pBack(VMA_NULL),
3694  m_Count(0)
3695 {
3696 }
3697 
3698 template<typename T>
3699 VmaRawList<T>::~VmaRawList()
3700 {
3701  // Intentionally not calling Clear, because that would be unnecessary
3702  // computations to return all items to m_ItemAllocator as free.
3703 }
3704 
3705 template<typename T>
3706 void VmaRawList<T>::Clear()
3707 {
3708  if(IsEmpty() == false)
3709  {
3710  ItemType* pItem = m_pBack;
3711  while(pItem != VMA_NULL)
3712  {
3713  ItemType* const pPrevItem = pItem->pPrev;
3714  m_ItemAllocator.Free(pItem);
3715  pItem = pPrevItem;
3716  }
3717  m_pFront = VMA_NULL;
3718  m_pBack = VMA_NULL;
3719  m_Count = 0;
3720  }
3721 }
3722 
3723 template<typename T>
3724 VmaListItem<T>* VmaRawList<T>::PushBack()
3725 {
3726  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3727  pNewItem->pNext = VMA_NULL;
3728  if(IsEmpty())
3729  {
3730  pNewItem->pPrev = VMA_NULL;
3731  m_pFront = pNewItem;
3732  m_pBack = pNewItem;
3733  m_Count = 1;
3734  }
3735  else
3736  {
3737  pNewItem->pPrev = m_pBack;
3738  m_pBack->pNext = pNewItem;
3739  m_pBack = pNewItem;
3740  ++m_Count;
3741  }
3742  return pNewItem;
3743 }
3744 
3745 template<typename T>
3746 VmaListItem<T>* VmaRawList<T>::PushFront()
3747 {
3748  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3749  pNewItem->pPrev = VMA_NULL;
3750  if(IsEmpty())
3751  {
3752  pNewItem->pNext = VMA_NULL;
3753  m_pFront = pNewItem;
3754  m_pBack = pNewItem;
3755  m_Count = 1;
3756  }
3757  else
3758  {
3759  pNewItem->pNext = m_pFront;
3760  m_pFront->pPrev = pNewItem;
3761  m_pFront = pNewItem;
3762  ++m_Count;
3763  }
3764  return pNewItem;
3765 }
3766 
3767 template<typename T>
3768 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3769 {
3770  ItemType* const pNewItem = PushBack();
3771  pNewItem->Value = value;
3772  return pNewItem;
3773 }
3774 
3775 template<typename T>
3776 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3777 {
3778  ItemType* const pNewItem = PushFront();
3779  pNewItem->Value = value;
3780  return pNewItem;
3781 }
3782 
3783 template<typename T>
3784 void VmaRawList<T>::PopBack()
3785 {
3786  VMA_HEAVY_ASSERT(m_Count > 0);
3787  ItemType* const pBackItem = m_pBack;
3788  ItemType* const pPrevItem = pBackItem->pPrev;
3789  if(pPrevItem != VMA_NULL)
3790  {
3791  pPrevItem->pNext = VMA_NULL;
3792  }
3793  m_pBack = pPrevItem;
3794  m_ItemAllocator.Free(pBackItem);
3795  --m_Count;
3796 }
3797 
3798 template<typename T>
3799 void VmaRawList<T>::PopFront()
3800 {
3801  VMA_HEAVY_ASSERT(m_Count > 0);
3802  ItemType* const pFrontItem = m_pFront;
3803  ItemType* const pNextItem = pFrontItem->pNext;
3804  if(pNextItem != VMA_NULL)
3805  {
3806  pNextItem->pPrev = VMA_NULL;
3807  }
3808  m_pFront = pNextItem;
3809  m_ItemAllocator.Free(pFrontItem);
3810  --m_Count;
3811 }
3812 
3813 template<typename T>
3814 void VmaRawList<T>::Remove(ItemType* pItem)
3815 {
3816  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3817  VMA_HEAVY_ASSERT(m_Count > 0);
3818 
3819  if(pItem->pPrev != VMA_NULL)
3820  {
3821  pItem->pPrev->pNext = pItem->pNext;
3822  }
3823  else
3824  {
3825  VMA_HEAVY_ASSERT(m_pFront == pItem);
3826  m_pFront = pItem->pNext;
3827  }
3828 
3829  if(pItem->pNext != VMA_NULL)
3830  {
3831  pItem->pNext->pPrev = pItem->pPrev;
3832  }
3833  else
3834  {
3835  VMA_HEAVY_ASSERT(m_pBack == pItem);
3836  m_pBack = pItem->pPrev;
3837  }
3838 
3839  m_ItemAllocator.Free(pItem);
3840  --m_Count;
3841 }
3842 
3843 template<typename T>
3844 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3845 {
3846  if(pItem != VMA_NULL)
3847  {
3848  ItemType* const prevItem = pItem->pPrev;
3849  ItemType* const newItem = m_ItemAllocator.Alloc();
3850  newItem->pPrev = prevItem;
3851  newItem->pNext = pItem;
3852  pItem->pPrev = newItem;
3853  if(prevItem != VMA_NULL)
3854  {
3855  prevItem->pNext = newItem;
3856  }
3857  else
3858  {
3859  VMA_HEAVY_ASSERT(m_pFront == pItem);
3860  m_pFront = newItem;
3861  }
3862  ++m_Count;
3863  return newItem;
3864  }
3865  else
3866  return PushBack();
3867 }
3868 
3869 template<typename T>
3870 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3871 {
3872  if(pItem != VMA_NULL)
3873  {
3874  ItemType* const nextItem = pItem->pNext;
3875  ItemType* const newItem = m_ItemAllocator.Alloc();
3876  newItem->pNext = nextItem;
3877  newItem->pPrev = pItem;
3878  pItem->pNext = newItem;
3879  if(nextItem != VMA_NULL)
3880  {
3881  nextItem->pPrev = newItem;
3882  }
3883  else
3884  {
3885  VMA_HEAVY_ASSERT(m_pBack == pItem);
3886  m_pBack = newItem;
3887  }
3888  ++m_Count;
3889  return newItem;
3890  }
3891  else
3892  return PushFront();
3893 }
3894 
3895 template<typename T>
3896 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3897 {
3898  ItemType* const newItem = InsertBefore(pItem);
3899  newItem->Value = value;
3900  return newItem;
3901 }
3902 
3903 template<typename T>
3904 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3905 {
3906  ItemType* const newItem = InsertAfter(pItem);
3907  newItem->Value = value;
3908  return newItem;
3909 }
3910 
3911 template<typename T, typename AllocatorT>
3912 class VmaList
3913 {
3914  VMA_CLASS_NO_COPY(VmaList)
3915 public:
3916  class iterator
3917  {
3918  public:
3919  iterator() :
3920  m_pList(VMA_NULL),
3921  m_pItem(VMA_NULL)
3922  {
3923  }
3924 
3925  T& operator*() const
3926  {
3927  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3928  return m_pItem->Value;
3929  }
3930  T* operator->() const
3931  {
3932  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3933  return &m_pItem->Value;
3934  }
3935 
3936  iterator& operator++()
3937  {
3938  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3939  m_pItem = m_pItem->pNext;
3940  return *this;
3941  }
3942  iterator& operator--()
3943  {
3944  if(m_pItem != VMA_NULL)
3945  {
3946  m_pItem = m_pItem->pPrev;
3947  }
3948  else
3949  {
3950  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3951  m_pItem = m_pList->Back();
3952  }
3953  return *this;
3954  }
3955 
3956  iterator operator++(int)
3957  {
3958  iterator result = *this;
3959  ++*this;
3960  return result;
3961  }
3962  iterator operator--(int)
3963  {
3964  iterator result = *this;
3965  --*this;
3966  return result;
3967  }
3968 
3969  bool operator==(const iterator& rhs) const
3970  {
3971  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3972  return m_pItem == rhs.m_pItem;
3973  }
3974  bool operator!=(const iterator& rhs) const
3975  {
3976  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3977  return m_pItem != rhs.m_pItem;
3978  }
3979 
3980  private:
3981  VmaRawList<T>* m_pList;
3982  VmaListItem<T>* m_pItem;
3983 
3984  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3985  m_pList(pList),
3986  m_pItem(pItem)
3987  {
3988  }
3989 
3990  friend class VmaList<T, AllocatorT>;
3991  };
3992 
3993  class const_iterator
3994  {
3995  public:
3996  const_iterator() :
3997  m_pList(VMA_NULL),
3998  m_pItem(VMA_NULL)
3999  {
4000  }
4001 
4002  const_iterator(const iterator& src) :
4003  m_pList(src.m_pList),
4004  m_pItem(src.m_pItem)
4005  {
4006  }
4007 
4008  const T& operator*() const
4009  {
4010  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4011  return m_pItem->Value;
4012  }
4013  const T* operator->() const
4014  {
4015  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4016  return &m_pItem->Value;
4017  }
4018 
4019  const_iterator& operator++()
4020  {
4021  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4022  m_pItem = m_pItem->pNext;
4023  return *this;
4024  }
4025  const_iterator& operator--()
4026  {
4027  if(m_pItem != VMA_NULL)
4028  {
4029  m_pItem = m_pItem->pPrev;
4030  }
4031  else
4032  {
4033  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4034  m_pItem = m_pList->Back();
4035  }
4036  return *this;
4037  }
4038 
4039  const_iterator operator++(int)
4040  {
4041  const_iterator result = *this;
4042  ++*this;
4043  return result;
4044  }
4045  const_iterator operator--(int)
4046  {
4047  const_iterator result = *this;
4048  --*this;
4049  return result;
4050  }
4051 
4052  bool operator==(const const_iterator& rhs) const
4053  {
4054  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4055  return m_pItem == rhs.m_pItem;
4056  }
4057  bool operator!=(const const_iterator& rhs) const
4058  {
4059  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4060  return m_pItem != rhs.m_pItem;
4061  }
4062 
4063  private:
4064  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4065  m_pList(pList),
4066  m_pItem(pItem)
4067  {
4068  }
4069 
4070  const VmaRawList<T>* m_pList;
4071  const VmaListItem<T>* m_pItem;
4072 
4073  friend class VmaList<T, AllocatorT>;
4074  };
4075 
4076  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4077 
4078  bool empty() const { return m_RawList.IsEmpty(); }
4079  size_t size() const { return m_RawList.GetCount(); }
4080 
4081  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4082  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4083 
4084  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4085  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4086 
4087  void clear() { m_RawList.Clear(); }
4088  void push_back(const T& value) { m_RawList.PushBack(value); }
4089  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4090  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4091 
4092 private:
4093  VmaRawList<T> m_RawList;
4094 };
4095 
4096 #endif // #if VMA_USE_STL_LIST
4097 
4099 // class VmaMap
4100 
4101 // Unused in this version.
4102 #if 0
4103 
4104 #if VMA_USE_STL_UNORDERED_MAP
4105 
4106 #define VmaPair std::pair
4107 
4108 #define VMA_MAP_TYPE(KeyT, ValueT) \
4109  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4110 
4111 #else // #if VMA_USE_STL_UNORDERED_MAP
4112 
4113 template<typename T1, typename T2>
4114 struct VmaPair
4115 {
4116  T1 first;
4117  T2 second;
4118 
4119  VmaPair() : first(), second() { }
4120  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4121 };
4122 
4123 /* Class compatible with subset of interface of std::unordered_map.
4124 KeyT, ValueT must be POD because they will be stored in VmaVector.
4125 */
4126 template<typename KeyT, typename ValueT>
4127 class VmaMap
4128 {
4129 public:
4130  typedef VmaPair<KeyT, ValueT> PairType;
4131  typedef PairType* iterator;
4132 
4133  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4134 
4135  iterator begin() { return m_Vector.begin(); }
4136  iterator end() { return m_Vector.end(); }
4137 
4138  void insert(const PairType& pair);
4139  iterator find(const KeyT& key);
4140  void erase(iterator it);
4141 
4142 private:
4143  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4144 };
4145 
4146 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4147 
4148 template<typename FirstT, typename SecondT>
4149 struct VmaPairFirstLess
4150 {
4151  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4152  {
4153  return lhs.first < rhs.first;
4154  }
4155  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4156  {
4157  return lhs.first < rhsFirst;
4158  }
4159 };
4160 
4161 template<typename KeyT, typename ValueT>
4162 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4163 {
4164  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4165  m_Vector.data(),
4166  m_Vector.data() + m_Vector.size(),
4167  pair,
4168  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4169  VmaVectorInsert(m_Vector, indexToInsert, pair);
4170 }
4171 
4172 template<typename KeyT, typename ValueT>
4173 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4174 {
4175  PairType* it = VmaBinaryFindFirstNotLess(
4176  m_Vector.data(),
4177  m_Vector.data() + m_Vector.size(),
4178  key,
4179  VmaPairFirstLess<KeyT, ValueT>());
4180  if((it != m_Vector.end()) && (it->first == key))
4181  {
4182  return it;
4183  }
4184  else
4185  {
4186  return m_Vector.end();
4187  }
4188 }
4189 
4190 template<typename KeyT, typename ValueT>
4191 void VmaMap<KeyT, ValueT>::erase(iterator it)
4192 {
4193  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4194 }
4195 
4196 #endif // #if VMA_USE_STL_UNORDERED_MAP
4197 
4198 #endif // #if 0
4199 
4201 
4202 class VmaDeviceMemoryBlock;
4203 
4204 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4205 
4206 struct VmaAllocation_T
4207 {
4208  VMA_CLASS_NO_COPY(VmaAllocation_T)
4209 private:
4210  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4211 
4212  enum FLAGS
4213  {
4214  FLAG_USER_DATA_STRING = 0x01,
4215  };
4216 
4217 public:
4218  enum ALLOCATION_TYPE
4219  {
4220  ALLOCATION_TYPE_NONE,
4221  ALLOCATION_TYPE_BLOCK,
4222  ALLOCATION_TYPE_DEDICATED,
4223  };
4224 
4225  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4226  m_Alignment(1),
4227  m_Size(0),
4228  m_pUserData(VMA_NULL),
4229  m_LastUseFrameIndex(currentFrameIndex),
4230  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4231  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4232  m_MapCount(0),
4233  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4234  {
4235 #if VMA_STATS_STRING_ENABLED
4236  m_CreationFrameIndex = currentFrameIndex;
4237  m_BufferImageUsage = 0;
4238 #endif
4239  }
4240 
4241  ~VmaAllocation_T()
4242  {
4243  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4244 
4245  // Check if owned string was freed.
4246  VMA_ASSERT(m_pUserData == VMA_NULL);
4247  }
4248 
4249  void InitBlockAllocation(
4250  VmaPool hPool,
4251  VmaDeviceMemoryBlock* block,
4252  VkDeviceSize offset,
4253  VkDeviceSize alignment,
4254  VkDeviceSize size,
4255  VmaSuballocationType suballocationType,
4256  bool mapped,
4257  bool canBecomeLost)
4258  {
4259  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4260  VMA_ASSERT(block != VMA_NULL);
4261  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4262  m_Alignment = alignment;
4263  m_Size = size;
4264  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4265  m_SuballocationType = (uint8_t)suballocationType;
4266  m_BlockAllocation.m_hPool = hPool;
4267  m_BlockAllocation.m_Block = block;
4268  m_BlockAllocation.m_Offset = offset;
4269  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4270  }
4271 
4272  void InitLost()
4273  {
4274  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4275  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4276  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4277  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4278  m_BlockAllocation.m_Block = VMA_NULL;
4279  m_BlockAllocation.m_Offset = 0;
4280  m_BlockAllocation.m_CanBecomeLost = true;
4281  }
4282 
4283  void ChangeBlockAllocation(
4284  VmaAllocator hAllocator,
4285  VmaDeviceMemoryBlock* block,
4286  VkDeviceSize offset);
4287 
4288  // pMappedData not null means allocation is created with MAPPED flag.
4289  void InitDedicatedAllocation(
4290  uint32_t memoryTypeIndex,
4291  VkDeviceMemory hMemory,
4292  VmaSuballocationType suballocationType,
4293  void* pMappedData,
4294  VkDeviceSize size)
4295  {
4296  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4297  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4298  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4299  m_Alignment = 0;
4300  m_Size = size;
4301  m_SuballocationType = (uint8_t)suballocationType;
4302  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4303  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4304  m_DedicatedAllocation.m_hMemory = hMemory;
4305  m_DedicatedAllocation.m_pMappedData = pMappedData;
4306  }
4307 
4308  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4309  VkDeviceSize GetAlignment() const { return m_Alignment; }
4310  VkDeviceSize GetSize() const { return m_Size; }
4311  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4312  void* GetUserData() const { return m_pUserData; }
4313  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4314  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4315 
4316  VmaDeviceMemoryBlock* GetBlock() const
4317  {
4318  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4319  return m_BlockAllocation.m_Block;
4320  }
4321  VkDeviceSize GetOffset() const;
4322  VkDeviceMemory GetMemory() const;
4323  uint32_t GetMemoryTypeIndex() const;
4324  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4325  void* GetMappedData() const;
4326  bool CanBecomeLost() const;
4327  VmaPool GetPool() const;
4328 
4329  uint32_t GetLastUseFrameIndex() const
4330  {
4331  return m_LastUseFrameIndex.load();
4332  }
4333  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4334  {
4335  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4336  }
4337  /*
4338  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4339  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4340  - Else, returns false.
4341 
4342  If hAllocation is already lost, assert - you should not call it then.
4343  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4344  */
4345  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4346 
4347  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4348  {
4349  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4350  outInfo.blockCount = 1;
4351  outInfo.allocationCount = 1;
4352  outInfo.unusedRangeCount = 0;
4353  outInfo.usedBytes = m_Size;
4354  outInfo.unusedBytes = 0;
4355  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4356  outInfo.unusedRangeSizeMin = UINT64_MAX;
4357  outInfo.unusedRangeSizeMax = 0;
4358  }
4359 
4360  void BlockAllocMap();
4361  void BlockAllocUnmap();
4362  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4363  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4364 
4365 #if VMA_STATS_STRING_ENABLED
4366  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4367  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4368 
4369  void InitBufferImageUsage(uint32_t bufferImageUsage)
4370  {
4371  VMA_ASSERT(m_BufferImageUsage == 0);
4372  m_BufferImageUsage = bufferImageUsage;
4373  }
4374 
4375  void PrintParameters(class VmaJsonWriter& json) const;
4376 #endif
4377 
4378 private:
4379  VkDeviceSize m_Alignment;
4380  VkDeviceSize m_Size;
4381  void* m_pUserData;
4382  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4383  uint8_t m_Type; // ALLOCATION_TYPE
4384  uint8_t m_SuballocationType; // VmaSuballocationType
4385  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4386  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4387  uint8_t m_MapCount;
4388  uint8_t m_Flags; // enum FLAGS
4389 
4390  // Allocation out of VmaDeviceMemoryBlock.
4391  struct BlockAllocation
4392  {
4393  VmaPool m_hPool; // Null if belongs to general memory.
4394  VmaDeviceMemoryBlock* m_Block;
4395  VkDeviceSize m_Offset;
4396  bool m_CanBecomeLost;
4397  };
4398 
4399  // Allocation for an object that has its own private VkDeviceMemory.
4400  struct DedicatedAllocation
4401  {
4402  uint32_t m_MemoryTypeIndex;
4403  VkDeviceMemory m_hMemory;
4404  void* m_pMappedData; // Not null means memory is mapped.
4405  };
4406 
4407  union
4408  {
4409  // Allocation out of VmaDeviceMemoryBlock.
4410  BlockAllocation m_BlockAllocation;
4411  // Allocation for an object that has its own private VkDeviceMemory.
4412  DedicatedAllocation m_DedicatedAllocation;
4413  };
4414 
4415 #if VMA_STATS_STRING_ENABLED
4416  uint32_t m_CreationFrameIndex;
4417  uint32_t m_BufferImageUsage; // 0 if unknown.
4418 #endif
4419 
4420  void FreeUserDataString(VmaAllocator hAllocator);
4421 };
4422 
4423 /*
4424 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4425 allocated memory block or free.
4426 */
4427 struct VmaSuballocation
4428 {
4429  VkDeviceSize offset;
4430  VkDeviceSize size;
4431  VmaAllocation hAllocation;
4432  VmaSuballocationType type;
4433 };
4434 
4435 // Comparator for offsets.
4436 struct VmaSuballocationOffsetLess
4437 {
4438  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4439  {
4440  return lhs.offset < rhs.offset;
4441  }
4442 };
4443 struct VmaSuballocationOffsetGreater
4444 {
4445  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4446  {
4447  return lhs.offset > rhs.offset;
4448  }
4449 };
4450 
4451 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4452 
4453 // Cost of one additional allocation lost, as equivalent in bytes.
4454 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4455 
4456 /*
4457 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4458 
4459 If canMakeOtherLost was false:
4460 - item points to a FREE suballocation.
4461 - itemsToMakeLostCount is 0.
4462 
4463 If canMakeOtherLost was true:
4464 - item points to first of sequence of suballocations, which are either FREE,
4465  or point to VmaAllocations that can become lost.
4466 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4467  the requested allocation to succeed.
4468 */
4469 struct VmaAllocationRequest
4470 {
4471  VkDeviceSize offset;
4472  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4473  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4474  VmaSuballocationList::iterator item;
4475  size_t itemsToMakeLostCount;
4476 
4477  VkDeviceSize CalcCost() const
4478  {
4479  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4480  }
4481 };
4482 
4483 /*
4484 Data structure used for bookkeeping of allocations and unused ranges of memory
4485 in a single VkDeviceMemory block.
4486 */
4487 class VmaBlockMetadata
4488 {
4489 public:
4490  VmaBlockMetadata() : m_Size(0) { }
4491  virtual ~VmaBlockMetadata() { }
4492  virtual void Init(VkDeviceSize size) { m_Size = size; }
4493 
4494  // Validates all data structures inside this object. If not valid, returns false.
4495  virtual bool Validate() const = 0;
4496  VkDeviceSize GetSize() const { return m_Size; }
4497  virtual size_t GetAllocationCount() const = 0;
4498  virtual VkDeviceSize GetSumFreeSize() const = 0;
4499  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4500  // Returns true if this block is empty - contains only single free suballocation.
4501  virtual bool IsEmpty() const = 0;
4502 
4503  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4504  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4505 
4506 #if VMA_STATS_STRING_ENABLED
4507  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4508 #endif
4509 
4510  // Tries to find a place for suballocation with given parameters inside this block.
4511  // If succeeded, fills pAllocationRequest and returns true.
4512  // If failed, returns false.
4513  virtual bool CreateAllocationRequest(
4514  uint32_t currentFrameIndex,
4515  uint32_t frameInUseCount,
4516  VkDeviceSize bufferImageGranularity,
4517  VkDeviceSize allocSize,
4518  VkDeviceSize allocAlignment,
4519  bool upperAddress,
4520  VmaSuballocationType allocType,
4521  bool canMakeOtherLost,
4522  VmaAllocationRequest* pAllocationRequest) = 0;
4523 
4524  virtual bool MakeRequestedAllocationsLost(
4525  uint32_t currentFrameIndex,
4526  uint32_t frameInUseCount,
4527  VmaAllocationRequest* pAllocationRequest) = 0;
4528 
4529  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4530 
4531  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4532 
4533  // Makes actual allocation based on request. Request must already be checked and valid.
4534  virtual void Alloc(
4535  const VmaAllocationRequest& request,
4536  VmaSuballocationType type,
4537  VkDeviceSize allocSize,
4538  bool upperAddress,
4539  VmaAllocation hAllocation) = 0;
4540 
4541  // Frees suballocation assigned to given memory region.
4542  virtual void Free(const VmaAllocation allocation) = 0;
4543  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4544 
4545 protected:
4546 #if VMA_STATS_STRING_ENABLED
4547  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4548  VkDeviceSize unusedBytes,
4549  size_t allocationCount,
4550  size_t unusedRangeCount) const;
4551  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4552  VkDeviceSize offset,
4553  VmaAllocation hAllocation) const;
4554  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4555  VkDeviceSize offset,
4556  VkDeviceSize size) const;
4557  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4558 #endif
4559 
4560 private:
4561  VkDeviceSize m_Size;
4562 };
4563 
4564 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4565 {
4566  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4567 public:
4568  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4569  virtual ~VmaBlockMetadata_Generic();
4570  virtual void Init(VkDeviceSize size);
4571 
4572  virtual bool Validate() const;
4573  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4574  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4575  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4576  virtual bool IsEmpty() const;
4577 
4578  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4579  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4580 
4581 #if VMA_STATS_STRING_ENABLED
4582  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4583 #endif
4584 
4585  virtual bool CreateAllocationRequest(
4586  uint32_t currentFrameIndex,
4587  uint32_t frameInUseCount,
4588  VkDeviceSize bufferImageGranularity,
4589  VkDeviceSize allocSize,
4590  VkDeviceSize allocAlignment,
4591  bool upperAddress,
4592  VmaSuballocationType allocType,
4593  bool canMakeOtherLost,
4594  VmaAllocationRequest* pAllocationRequest);
4595 
4596  virtual bool MakeRequestedAllocationsLost(
4597  uint32_t currentFrameIndex,
4598  uint32_t frameInUseCount,
4599  VmaAllocationRequest* pAllocationRequest);
4600 
4601  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4602 
4603  virtual VkResult CheckCorruption(const void* pBlockData);
4604 
4605  virtual void Alloc(
4606  const VmaAllocationRequest& request,
4607  VmaSuballocationType type,
4608  VkDeviceSize allocSize,
4609  bool upperAddress,
4610  VmaAllocation hAllocation);
4611 
4612  virtual void Free(const VmaAllocation allocation);
4613  virtual void FreeAtOffset(VkDeviceSize offset);
4614 
4615 private:
4616  uint32_t m_FreeCount;
4617  VkDeviceSize m_SumFreeSize;
4618  VmaSuballocationList m_Suballocations;
4619  // Suballocations that are free and have size greater than certain threshold.
4620  // Sorted by size, ascending.
4621  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4622 
4623  bool ValidateFreeSuballocationList() const;
4624 
4625  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4626  // If yes, fills pOffset and returns true. If no, returns false.
4627  bool CheckAllocation(
4628  uint32_t currentFrameIndex,
4629  uint32_t frameInUseCount,
4630  VkDeviceSize bufferImageGranularity,
4631  VkDeviceSize allocSize,
4632  VkDeviceSize allocAlignment,
4633  VmaSuballocationType allocType,
4634  VmaSuballocationList::const_iterator suballocItem,
4635  bool canMakeOtherLost,
4636  VkDeviceSize* pOffset,
4637  size_t* itemsToMakeLostCount,
4638  VkDeviceSize* pSumFreeSize,
4639  VkDeviceSize* pSumItemSize) const;
4640  // Given free suballocation, it merges it with following one, which must also be free.
4641  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4642  // Releases given suballocation, making it free.
4643  // Merges it with adjacent free suballocations if applicable.
4644  // Returns iterator to new free suballocation at this place.
4645  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4646  // Given free suballocation, it inserts it into sorted list of
4647  // m_FreeSuballocationsBySize if it's suitable.
4648  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4649  // Given free suballocation, it removes it from sorted list of
4650  // m_FreeSuballocationsBySize if it's suitable.
4651  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4652 };
4653 
4654 /*
4655 Allocations and their references in internal data structure look like this:
4656 
4657 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4658 
4659  0 +-------+
4660  | |
4661  | |
4662  | |
4663  +-------+
4664  | Alloc | 1st[m_1stNullItemsBeginCount]
4665  +-------+
4666  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4667  +-------+
4668  | ... |
4669  +-------+
4670  | Alloc | 1st[1st.size() - 1]
4671  +-------+
4672  | |
4673  | |
4674  | |
4675 GetSize() +-------+
4676 
4677 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4678 
4679  0 +-------+
4680  | Alloc | 2nd[0]
4681  +-------+
4682  | Alloc | 2nd[1]
4683  +-------+
4684  | ... |
4685  +-------+
4686  | Alloc | 2nd[2nd.size() - 1]
4687  +-------+
4688  | |
4689  | |
4690  | |
4691  +-------+
4692  | Alloc | 1st[m_1stNullItemsBeginCount]
4693  +-------+
4694  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4695  +-------+
4696  | ... |
4697  +-------+
4698  | Alloc | 1st[1st.size() - 1]
4699  +-------+
4700  | |
4701 GetSize() +-------+
4702 
4703 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4704 
4705  0 +-------+
4706  | |
4707  | |
4708  | |
4709  +-------+
4710  | Alloc | 1st[m_1stNullItemsBeginCount]
4711  +-------+
4712  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4713  +-------+
4714  | ... |
4715  +-------+
4716  | Alloc | 1st[1st.size() - 1]
4717  +-------+
4718  | |
4719  | |
4720  | |
4721  +-------+
4722  | Alloc | 2nd[2nd.size() - 1]
4723  +-------+
4724  | ... |
4725  +-------+
4726  | Alloc | 2nd[1]
4727  +-------+
4728  | Alloc | 2nd[0]
4729 GetSize() +-------+
4730 
4731 */
4732 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4733 {
4734  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4735 public:
4736  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4737  virtual ~VmaBlockMetadata_Linear();
4738  virtual void Init(VkDeviceSize size);
4739 
4740  virtual bool Validate() const;
4741  virtual size_t GetAllocationCount() const;
4742  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4743  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4744  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4745 
4746  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4747  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4748 
4749 #if VMA_STATS_STRING_ENABLED
4750  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4751 #endif
4752 
4753  virtual bool CreateAllocationRequest(
4754  uint32_t currentFrameIndex,
4755  uint32_t frameInUseCount,
4756  VkDeviceSize bufferImageGranularity,
4757  VkDeviceSize allocSize,
4758  VkDeviceSize allocAlignment,
4759  bool upperAddress,
4760  VmaSuballocationType allocType,
4761  bool canMakeOtherLost,
4762  VmaAllocationRequest* pAllocationRequest);
4763 
4764  virtual bool MakeRequestedAllocationsLost(
4765  uint32_t currentFrameIndex,
4766  uint32_t frameInUseCount,
4767  VmaAllocationRequest* pAllocationRequest);
4768 
4769  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4770 
4771  virtual VkResult CheckCorruption(const void* pBlockData);
4772 
4773  virtual void Alloc(
4774  const VmaAllocationRequest& request,
4775  VmaSuballocationType type,
4776  VkDeviceSize allocSize,
4777  bool upperAddress,
4778  VmaAllocation hAllocation);
4779 
4780  virtual void Free(const VmaAllocation allocation);
4781  virtual void FreeAtOffset(VkDeviceSize offset);
4782 
4783 private:
4784  /*
4785  There are two suballocation vectors, used in ping-pong way.
4786  The one with index m_1stVectorIndex is called 1st.
4787  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4788  2nd can be non-empty only when 1st is not empty.
4789  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4790  */
4791  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4792 
4793  enum SECOND_VECTOR_MODE
4794  {
4795  SECOND_VECTOR_EMPTY,
4796  /*
4797  Suballocations in 2nd vector are created later than the ones in 1st, but they
4798  all have smaller offset.
4799  */
4800  SECOND_VECTOR_RING_BUFFER,
4801  /*
4802  Suballocations in 2nd vector are upper side of double stack.
4803  They all have offsets higher than those in 1st vector.
4804  Top of this stack means smaller offsets, but higher indices in this vector.
4805  */
4806  SECOND_VECTOR_DOUBLE_STACK,
4807  };
4808 
4809  VkDeviceSize m_SumFreeSize;
4810  SuballocationVectorType m_Suballocations0, m_Suballocations1;
4811  uint32_t m_1stVectorIndex;
4812  SECOND_VECTOR_MODE m_2ndVectorMode;
4813 
4814  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4815  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4816  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4817  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4818 
4819  // Number of items in 1st vector with hAllocation = null at the beginning.
4820  size_t m_1stNullItemsBeginCount;
4821  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
4822  size_t m_1stNullItemsMiddleCount;
4823  // Number of items in 2nd vector with hAllocation = null.
4824  size_t m_2ndNullItemsCount;
4825 
4826  bool ShouldCompact1st() const;
4827  void CleanupAfterFree();
4828 };
4829 
4830 /*
4831 Represents a single block of device memory (`VkDeviceMemory`) with all the
4832 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4833 
4834 Thread-safety: This class must be externally synchronized.
4835 */
4836 class VmaDeviceMemoryBlock
4837 {
4838  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4839 public:
4840  VmaBlockMetadata* m_pMetadata;
4841 
4842  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4843 
4844  ~VmaDeviceMemoryBlock()
4845  {
4846  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4847  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4848  }
4849 
4850  // Always call after construction.
4851  void Init(
4852  VmaAllocator hAllocator,
4853  uint32_t newMemoryTypeIndex,
4854  VkDeviceMemory newMemory,
4855  VkDeviceSize newSize,
4856  uint32_t id,
4857  bool linearAlgorithm);
4858  // Always call before destruction.
4859  void Destroy(VmaAllocator allocator);
4860 
4861  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4862  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4863  uint32_t GetId() const { return m_Id; }
4864  void* GetMappedData() const { return m_pMappedData; }
4865 
4866  // Validates all data structures inside this object. If not valid, returns false.
4867  bool Validate() const;
4868 
4869  VkResult CheckCorruption(VmaAllocator hAllocator);
4870 
4871  // ppData can be null.
4872  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4873  void Unmap(VmaAllocator hAllocator, uint32_t count);
4874 
4875  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4876  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4877 
4878  VkResult BindBufferMemory(
4879  const VmaAllocator hAllocator,
4880  const VmaAllocation hAllocation,
4881  VkBuffer hBuffer);
4882  VkResult BindImageMemory(
4883  const VmaAllocator hAllocator,
4884  const VmaAllocation hAllocation,
4885  VkImage hImage);
4886 
4887 private:
4888  uint32_t m_MemoryTypeIndex;
4889  uint32_t m_Id;
4890  VkDeviceMemory m_hMemory;
4891 
4892  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4893  // Also protects m_MapCount, m_pMappedData.
4894  VMA_MUTEX m_Mutex;
4895  uint32_t m_MapCount;
4896  void* m_pMappedData;
4897 };
4898 
4899 struct VmaPointerLess
4900 {
4901  bool operator()(const void* lhs, const void* rhs) const
4902  {
4903  return lhs < rhs;
4904  }
4905 };
4906 
4907 class VmaDefragmentator;
4908 
4909 /*
4910 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4911 Vulkan memory type.
4912 
4913 Synchronized internally with a mutex.
4914 */
4915 struct VmaBlockVector
4916 {
4917  VMA_CLASS_NO_COPY(VmaBlockVector)
4918 public:
4919  VmaBlockVector(
4920  VmaAllocator hAllocator,
4921  uint32_t memoryTypeIndex,
4922  VkDeviceSize preferredBlockSize,
4923  size_t minBlockCount,
4924  size_t maxBlockCount,
4925  VkDeviceSize bufferImageGranularity,
4926  uint32_t frameInUseCount,
4927  bool isCustomPool,
4928  bool linearAlgorithm);
4929  ~VmaBlockVector();
4930 
4931  VkResult CreateMinBlocks();
4932 
4933  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4934  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4935  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4936  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4937  bool UsesLinearAlgorithm() const { return m_LinearAlgorithm; }
4938 
4939  void GetPoolStats(VmaPoolStats* pStats);
4940 
4941  bool IsEmpty() const { return m_Blocks.empty(); }
4942  bool IsCorruptionDetectionEnabled() const;
4943 
4944  VkResult Allocate(
4945  VmaPool hCurrentPool,
4946  uint32_t currentFrameIndex,
4947  VkDeviceSize size,
4948  VkDeviceSize alignment,
4949  const VmaAllocationCreateInfo& createInfo,
4950  VmaSuballocationType suballocType,
4951  VmaAllocation* pAllocation);
4952 
4953  void Free(
4954  VmaAllocation hAllocation);
4955 
4956  // Adds statistics of this BlockVector to pStats.
4957  void AddStats(VmaStats* pStats);
4958 
4959 #if VMA_STATS_STRING_ENABLED
4960  void PrintDetailedMap(class VmaJsonWriter& json);
4961 #endif
4962 
4963  void MakePoolAllocationsLost(
4964  uint32_t currentFrameIndex,
4965  size_t* pLostAllocationCount);
4966  VkResult CheckCorruption();
4967 
4968  VmaDefragmentator* EnsureDefragmentator(
4969  VmaAllocator hAllocator,
4970  uint32_t currentFrameIndex);
4971 
4972  VkResult Defragment(
4973  VmaDefragmentationStats* pDefragmentationStats,
4974  VkDeviceSize& maxBytesToMove,
4975  uint32_t& maxAllocationsToMove);
4976 
4977  void DestroyDefragmentator();
4978 
4979 private:
4980  friend class VmaDefragmentator;
4981 
4982  const VmaAllocator m_hAllocator;
4983  const uint32_t m_MemoryTypeIndex;
4984  const VkDeviceSize m_PreferredBlockSize;
4985  const size_t m_MinBlockCount;
4986  const size_t m_MaxBlockCount;
4987  const VkDeviceSize m_BufferImageGranularity;
4988  const uint32_t m_FrameInUseCount;
4989  const bool m_IsCustomPool;
4990  const bool m_LinearAlgorithm;
4991  bool m_HasEmptyBlock;
4992  VMA_MUTEX m_Mutex;
4993  // Incrementally sorted by sumFreeSize, ascending.
4994  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4995  /* There can be at most one allocation that is completely empty - a
4996  hysteresis to avoid pessimistic case of alternating creation and destruction
4997  of a VkDeviceMemory. */
4998  VmaDefragmentator* m_pDefragmentator;
4999  uint32_t m_NextBlockId;
5000 
5001  VkDeviceSize CalcMaxBlockSize() const;
5002 
5003  // Finds and removes given block from vector.
5004  void Remove(VmaDeviceMemoryBlock* pBlock);
5005 
5006  // Performs single step in sorting m_Blocks. They may not be fully sorted
5007  // after this call.
5008  void IncrementallySortBlocks();
5009 
5010  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5011 };
5012 
5013 struct VmaPool_T
5014 {
5015  VMA_CLASS_NO_COPY(VmaPool_T)
5016 public:
5017  VmaBlockVector m_BlockVector;
5018 
5019  VmaPool_T(
5020  VmaAllocator hAllocator,
5021  const VmaPoolCreateInfo& createInfo);
5022  ~VmaPool_T();
5023 
5024  uint32_t GetId() const { return m_Id; }
5025  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5026 
5027 #if VMA_STATS_STRING_ENABLED
5028  //void PrintDetailedMap(class VmaStringBuilder& sb);
5029 #endif
5030 
5031 private:
5032  uint32_t m_Id;
5033 };
5034 
5035 class VmaDefragmentator
5036 {
5037  VMA_CLASS_NO_COPY(VmaDefragmentator)
5038 private:
5039  const VmaAllocator m_hAllocator;
5040  VmaBlockVector* const m_pBlockVector;
5041  uint32_t m_CurrentFrameIndex;
5042  VkDeviceSize m_BytesMoved;
5043  uint32_t m_AllocationsMoved;
5044 
5045  struct AllocationInfo
5046  {
5047  VmaAllocation m_hAllocation;
5048  VkBool32* m_pChanged;
5049 
5050  AllocationInfo() :
5051  m_hAllocation(VK_NULL_HANDLE),
5052  m_pChanged(VMA_NULL)
5053  {
5054  }
5055  };
5056 
5057  struct AllocationInfoSizeGreater
5058  {
5059  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5060  {
5061  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5062  }
5063  };
5064 
5065  // Used between AddAllocation and Defragment.
5066  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5067 
5068  struct BlockInfo
5069  {
5070  VmaDeviceMemoryBlock* m_pBlock;
5071  bool m_HasNonMovableAllocations;
5072  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5073 
5074  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5075  m_pBlock(VMA_NULL),
5076  m_HasNonMovableAllocations(true),
5077  m_Allocations(pAllocationCallbacks),
5078  m_pMappedDataForDefragmentation(VMA_NULL)
5079  {
5080  }
5081 
5082  void CalcHasNonMovableAllocations()
5083  {
5084  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5085  const size_t defragmentAllocCount = m_Allocations.size();
5086  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5087  }
5088 
5089  void SortAllocationsBySizeDescecnding()
5090  {
5091  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5092  }
5093 
5094  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5095  void Unmap(VmaAllocator hAllocator);
5096 
5097  private:
5098  // Not null if mapped for defragmentation only, not originally mapped.
5099  void* m_pMappedDataForDefragmentation;
5100  };
5101 
5102  struct BlockPointerLess
5103  {
5104  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5105  {
5106  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5107  }
5108  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5109  {
5110  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5111  }
5112  };
5113 
5114  // 1. Blocks with some non-movable allocations go first.
5115  // 2. Blocks with smaller sumFreeSize go first.
5116  struct BlockInfoCompareMoveDestination
5117  {
5118  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5119  {
5120  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5121  {
5122  return true;
5123  }
5124  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5125  {
5126  return false;
5127  }
5128  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5129  {
5130  return true;
5131  }
5132  return false;
5133  }
5134  };
5135 
5136  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5137  BlockInfoVector m_Blocks;
5138 
5139  VkResult DefragmentRound(
5140  VkDeviceSize maxBytesToMove,
5141  uint32_t maxAllocationsToMove);
5142 
5143  static bool MoveMakesSense(
5144  size_t dstBlockIndex, VkDeviceSize dstOffset,
5145  size_t srcBlockIndex, VkDeviceSize srcOffset);
5146 
5147 public:
5148  VmaDefragmentator(
5149  VmaAllocator hAllocator,
5150  VmaBlockVector* pBlockVector,
5151  uint32_t currentFrameIndex);
5152 
5153  ~VmaDefragmentator();
5154 
5155  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5156  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5157 
5158  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5159 
5160  VkResult Defragment(
5161  VkDeviceSize maxBytesToMove,
5162  uint32_t maxAllocationsToMove);
5163 };
5164 
5165 #if VMA_RECORDING_ENABLED
5166 
5167 class VmaRecorder
5168 {
5169 public:
5170  VmaRecorder();
5171  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5172  void WriteConfiguration(
5173  const VkPhysicalDeviceProperties& devProps,
5174  const VkPhysicalDeviceMemoryProperties& memProps,
5175  bool dedicatedAllocationExtensionEnabled);
5176  ~VmaRecorder();
5177 
5178  void RecordCreateAllocator(uint32_t frameIndex);
5179  void RecordDestroyAllocator(uint32_t frameIndex);
5180  void RecordCreatePool(uint32_t frameIndex,
5181  const VmaPoolCreateInfo& createInfo,
5182  VmaPool pool);
5183  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5184  void RecordAllocateMemory(uint32_t frameIndex,
5185  const VkMemoryRequirements& vkMemReq,
5186  const VmaAllocationCreateInfo& createInfo,
5187  VmaAllocation allocation);
5188  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5189  const VkMemoryRequirements& vkMemReq,
5190  bool requiresDedicatedAllocation,
5191  bool prefersDedicatedAllocation,
5192  const VmaAllocationCreateInfo& createInfo,
5193  VmaAllocation allocation);
5194  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5195  const VkMemoryRequirements& vkMemReq,
5196  bool requiresDedicatedAllocation,
5197  bool prefersDedicatedAllocation,
5198  const VmaAllocationCreateInfo& createInfo,
5199  VmaAllocation allocation);
5200  void RecordFreeMemory(uint32_t frameIndex,
5201  VmaAllocation allocation);
5202  void RecordSetAllocationUserData(uint32_t frameIndex,
5203  VmaAllocation allocation,
5204  const void* pUserData);
5205  void RecordCreateLostAllocation(uint32_t frameIndex,
5206  VmaAllocation allocation);
5207  void RecordMapMemory(uint32_t frameIndex,
5208  VmaAllocation allocation);
5209  void RecordUnmapMemory(uint32_t frameIndex,
5210  VmaAllocation allocation);
5211  void RecordFlushAllocation(uint32_t frameIndex,
5212  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5213  void RecordInvalidateAllocation(uint32_t frameIndex,
5214  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5215  void RecordCreateBuffer(uint32_t frameIndex,
5216  const VkBufferCreateInfo& bufCreateInfo,
5217  const VmaAllocationCreateInfo& allocCreateInfo,
5218  VmaAllocation allocation);
5219  void RecordCreateImage(uint32_t frameIndex,
5220  const VkImageCreateInfo& imageCreateInfo,
5221  const VmaAllocationCreateInfo& allocCreateInfo,
5222  VmaAllocation allocation);
5223  void RecordDestroyBuffer(uint32_t frameIndex,
5224  VmaAllocation allocation);
5225  void RecordDestroyImage(uint32_t frameIndex,
5226  VmaAllocation allocation);
5227  void RecordTouchAllocation(uint32_t frameIndex,
5228  VmaAllocation allocation);
5229  void RecordGetAllocationInfo(uint32_t frameIndex,
5230  VmaAllocation allocation);
5231  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5232  VmaPool pool);
5233 
5234 private:
5235  struct CallParams
5236  {
5237  uint32_t threadId;
5238  double time;
5239  };
5240 
5241  class UserDataString
5242  {
5243  public:
5244  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5245  const char* GetString() const { return m_Str; }
5246 
5247  private:
5248  char m_PtrStr[17];
5249  const char* m_Str;
5250  };
5251 
5252  bool m_UseMutex;
5253  VmaRecordFlags m_Flags;
5254  FILE* m_File;
5255  VMA_MUTEX m_FileMutex;
5256  int64_t m_Freq;
5257  int64_t m_StartCounter;
5258 
5259  void GetBasicParams(CallParams& outParams);
5260  void Flush();
5261 };
5262 
5263 #endif // #if VMA_RECORDING_ENABLED
5264 
5265 // Main allocator object.
5266 struct VmaAllocator_T
5267 {
5268  VMA_CLASS_NO_COPY(VmaAllocator_T)
5269 public:
5270  bool m_UseMutex;
5271  bool m_UseKhrDedicatedAllocation;
5272  VkDevice m_hDevice;
5273  bool m_AllocationCallbacksSpecified;
5274  VkAllocationCallbacks m_AllocationCallbacks;
5275  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5276 
5277  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5278  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5279  VMA_MUTEX m_HeapSizeLimitMutex;
5280 
5281  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5282  VkPhysicalDeviceMemoryProperties m_MemProps;
5283 
5284  // Default pools.
5285  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5286 
5287  // Each vector is sorted by memory (handle value).
5288  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5289  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5290  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5291 
5292  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5293  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5294  ~VmaAllocator_T();
5295 
5296  const VkAllocationCallbacks* GetAllocationCallbacks() const
5297  {
5298  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5299  }
5300  const VmaVulkanFunctions& GetVulkanFunctions() const
5301  {
5302  return m_VulkanFunctions;
5303  }
5304 
5305  VkDeviceSize GetBufferImageGranularity() const
5306  {
5307  return VMA_MAX(
5308  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5309  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5310  }
5311 
5312  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5313  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5314 
5315  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5316  {
5317  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5318  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5319  }
5320  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5321  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5322  {
5323  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5324  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5325  }
5326  // Minimum alignment for all allocations in specific memory type.
5327  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5328  {
5329  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5330  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5331  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5332  }
5333 
5334  bool IsIntegratedGpu() const
5335  {
5336  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5337  }
5338 
5339 #if VMA_RECORDING_ENABLED
5340  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5341 #endif
5342 
5343  void GetBufferMemoryRequirements(
5344  VkBuffer hBuffer,
5345  VkMemoryRequirements& memReq,
5346  bool& requiresDedicatedAllocation,
5347  bool& prefersDedicatedAllocation) const;
5348  void GetImageMemoryRequirements(
5349  VkImage hImage,
5350  VkMemoryRequirements& memReq,
5351  bool& requiresDedicatedAllocation,
5352  bool& prefersDedicatedAllocation) const;
5353 
5354  // Main allocation function.
5355  VkResult AllocateMemory(
5356  const VkMemoryRequirements& vkMemReq,
5357  bool requiresDedicatedAllocation,
5358  bool prefersDedicatedAllocation,
5359  VkBuffer dedicatedBuffer,
5360  VkImage dedicatedImage,
5361  const VmaAllocationCreateInfo& createInfo,
5362  VmaSuballocationType suballocType,
5363  VmaAllocation* pAllocation);
5364 
5365  // Main deallocation function.
5366  void FreeMemory(const VmaAllocation allocation);
5367 
5368  void CalculateStats(VmaStats* pStats);
5369 
5370 #if VMA_STATS_STRING_ENABLED
5371  void PrintDetailedMap(class VmaJsonWriter& json);
5372 #endif
5373 
5374  VkResult Defragment(
5375  VmaAllocation* pAllocations,
5376  size_t allocationCount,
5377  VkBool32* pAllocationsChanged,
5378  const VmaDefragmentationInfo* pDefragmentationInfo,
5379  VmaDefragmentationStats* pDefragmentationStats);
5380 
5381  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5382  bool TouchAllocation(VmaAllocation hAllocation);
5383 
5384  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5385  void DestroyPool(VmaPool pool);
5386  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5387 
5388  void SetCurrentFrameIndex(uint32_t frameIndex);
5389  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5390 
5391  void MakePoolAllocationsLost(
5392  VmaPool hPool,
5393  size_t* pLostAllocationCount);
5394  VkResult CheckPoolCorruption(VmaPool hPool);
5395  VkResult CheckCorruption(uint32_t memoryTypeBits);
5396 
5397  void CreateLostAllocation(VmaAllocation* pAllocation);
5398 
5399  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5400  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5401 
5402  VkResult Map(VmaAllocation hAllocation, void** ppData);
5403  void Unmap(VmaAllocation hAllocation);
5404 
5405  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5406  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5407 
5408  void FlushOrInvalidateAllocation(
5409  VmaAllocation hAllocation,
5410  VkDeviceSize offset, VkDeviceSize size,
5411  VMA_CACHE_OPERATION op);
5412 
5413  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5414 
5415 private:
5416  VkDeviceSize m_PreferredLargeHeapBlockSize;
5417 
5418  VkPhysicalDevice m_PhysicalDevice;
5419  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5420 
5421  VMA_MUTEX m_PoolsMutex;
5422  // Protected by m_PoolsMutex. Sorted by pointer value.
5423  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5424  uint32_t m_NextPoolId;
5425 
5426  VmaVulkanFunctions m_VulkanFunctions;
5427 
5428 #if VMA_RECORDING_ENABLED
5429  VmaRecorder* m_pRecorder;
5430 #endif
5431 
5432  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5433 
5434  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5435 
5436  VkResult AllocateMemoryOfType(
5437  VkDeviceSize size,
5438  VkDeviceSize alignment,
5439  bool dedicatedAllocation,
5440  VkBuffer dedicatedBuffer,
5441  VkImage dedicatedImage,
5442  const VmaAllocationCreateInfo& createInfo,
5443  uint32_t memTypeIndex,
5444  VmaSuballocationType suballocType,
5445  VmaAllocation* pAllocation);
5446 
5447  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5448  VkResult AllocateDedicatedMemory(
5449  VkDeviceSize size,
5450  VmaSuballocationType suballocType,
5451  uint32_t memTypeIndex,
5452  bool map,
5453  bool isUserDataString,
5454  void* pUserData,
5455  VkBuffer dedicatedBuffer,
5456  VkImage dedicatedImage,
5457  VmaAllocation* pAllocation);
5458 
5459  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5460  void FreeDedicatedMemory(VmaAllocation allocation);
5461 };
5462 
5464 // Memory allocation #2 after VmaAllocator_T definition
5465 
5466 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5467 {
5468  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5469 }
5470 
5471 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5472 {
5473  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5474 }
5475 
5476 template<typename T>
5477 static T* VmaAllocate(VmaAllocator hAllocator)
5478 {
5479  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5480 }
5481 
5482 template<typename T>
5483 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5484 {
5485  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5486 }
5487 
5488 template<typename T>
5489 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5490 {
5491  if(ptr != VMA_NULL)
5492  {
5493  ptr->~T();
5494  VmaFree(hAllocator, ptr);
5495  }
5496 }
5497 
5498 template<typename T>
5499 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5500 {
5501  if(ptr != VMA_NULL)
5502  {
5503  for(size_t i = count; i--; )
5504  ptr[i].~T();
5505  VmaFree(hAllocator, ptr);
5506  }
5507 }
5508 
5510 // VmaStringBuilder
5511 
5512 #if VMA_STATS_STRING_ENABLED
5513 
5514 class VmaStringBuilder
5515 {
5516 public:
5517  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5518  size_t GetLength() const { return m_Data.size(); }
5519  const char* GetData() const { return m_Data.data(); }
5520 
5521  void Add(char ch) { m_Data.push_back(ch); }
5522  void Add(const char* pStr);
5523  void AddNewLine() { Add('\n'); }
5524  void AddNumber(uint32_t num);
5525  void AddNumber(uint64_t num);
5526  void AddPointer(const void* ptr);
5527 
5528 private:
5529  VmaVector< char, VmaStlAllocator<char> > m_Data;
5530 };
5531 
5532 void VmaStringBuilder::Add(const char* pStr)
5533 {
5534  const size_t strLen = strlen(pStr);
5535  if(strLen > 0)
5536  {
5537  const size_t oldCount = m_Data.size();
5538  m_Data.resize(oldCount + strLen);
5539  memcpy(m_Data.data() + oldCount, pStr, strLen);
5540  }
5541 }
5542 
5543 void VmaStringBuilder::AddNumber(uint32_t num)
5544 {
5545  char buf[11];
5546  VmaUint32ToStr(buf, sizeof(buf), num);
5547  Add(buf);
5548 }
5549 
5550 void VmaStringBuilder::AddNumber(uint64_t num)
5551 {
5552  char buf[21];
5553  VmaUint64ToStr(buf, sizeof(buf), num);
5554  Add(buf);
5555 }
5556 
5557 void VmaStringBuilder::AddPointer(const void* ptr)
5558 {
5559  char buf[21];
5560  VmaPtrToStr(buf, sizeof(buf), ptr);
5561  Add(buf);
5562 }
5563 
5564 #endif // #if VMA_STATS_STRING_ENABLED
5565 
5567 // VmaJsonWriter
5568 
5569 #if VMA_STATS_STRING_ENABLED
5570 
5571 class VmaJsonWriter
5572 {
5573  VMA_CLASS_NO_COPY(VmaJsonWriter)
5574 public:
5575  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5576  ~VmaJsonWriter();
5577 
5578  void BeginObject(bool singleLine = false);
5579  void EndObject();
5580 
5581  void BeginArray(bool singleLine = false);
5582  void EndArray();
5583 
5584  void WriteString(const char* pStr);
5585  void BeginString(const char* pStr = VMA_NULL);
5586  void ContinueString(const char* pStr);
5587  void ContinueString(uint32_t n);
5588  void ContinueString(uint64_t n);
5589  void ContinueString_Pointer(const void* ptr);
5590  void EndString(const char* pStr = VMA_NULL);
5591 
5592  void WriteNumber(uint32_t n);
5593  void WriteNumber(uint64_t n);
5594  void WriteBool(bool b);
5595  void WriteNull();
5596 
5597 private:
5598  static const char* const INDENT;
5599 
5600  enum COLLECTION_TYPE
5601  {
5602  COLLECTION_TYPE_OBJECT,
5603  COLLECTION_TYPE_ARRAY,
5604  };
5605  struct StackItem
5606  {
5607  COLLECTION_TYPE type;
5608  uint32_t valueCount;
5609  bool singleLineMode;
5610  };
5611 
5612  VmaStringBuilder& m_SB;
5613  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5614  bool m_InsideString;
5615 
5616  void BeginValue(bool isString);
5617  void WriteIndent(bool oneLess = false);
5618 };
5619 
5620 const char* const VmaJsonWriter::INDENT = " ";
5621 
5622 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5623  m_SB(sb),
5624  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5625  m_InsideString(false)
5626 {
5627 }
5628 
5629 VmaJsonWriter::~VmaJsonWriter()
5630 {
5631  VMA_ASSERT(!m_InsideString);
5632  VMA_ASSERT(m_Stack.empty());
5633 }
5634 
5635 void VmaJsonWriter::BeginObject(bool singleLine)
5636 {
5637  VMA_ASSERT(!m_InsideString);
5638 
5639  BeginValue(false);
5640  m_SB.Add('{');
5641 
5642  StackItem item;
5643  item.type = COLLECTION_TYPE_OBJECT;
5644  item.valueCount = 0;
5645  item.singleLineMode = singleLine;
5646  m_Stack.push_back(item);
5647 }
5648 
5649 void VmaJsonWriter::EndObject()
5650 {
5651  VMA_ASSERT(!m_InsideString);
5652 
5653  WriteIndent(true);
5654  m_SB.Add('}');
5655 
5656  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5657  m_Stack.pop_back();
5658 }
5659 
5660 void VmaJsonWriter::BeginArray(bool singleLine)
5661 {
5662  VMA_ASSERT(!m_InsideString);
5663 
5664  BeginValue(false);
5665  m_SB.Add('[');
5666 
5667  StackItem item;
5668  item.type = COLLECTION_TYPE_ARRAY;
5669  item.valueCount = 0;
5670  item.singleLineMode = singleLine;
5671  m_Stack.push_back(item);
5672 }
5673 
5674 void VmaJsonWriter::EndArray()
5675 {
5676  VMA_ASSERT(!m_InsideString);
5677 
5678  WriteIndent(true);
5679  m_SB.Add(']');
5680 
5681  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5682  m_Stack.pop_back();
5683 }
5684 
5685 void VmaJsonWriter::WriteString(const char* pStr)
5686 {
5687  BeginString(pStr);
5688  EndString();
5689 }
5690 
5691 void VmaJsonWriter::BeginString(const char* pStr)
5692 {
5693  VMA_ASSERT(!m_InsideString);
5694 
5695  BeginValue(true);
5696  m_SB.Add('"');
5697  m_InsideString = true;
5698  if(pStr != VMA_NULL && pStr[0] != '\0')
5699  {
5700  ContinueString(pStr);
5701  }
5702 }
5703 
5704 void VmaJsonWriter::ContinueString(const char* pStr)
5705 {
5706  VMA_ASSERT(m_InsideString);
5707 
5708  const size_t strLen = strlen(pStr);
5709  for(size_t i = 0; i < strLen; ++i)
5710  {
5711  char ch = pStr[i];
5712  if(ch == '\\')
5713  {
5714  m_SB.Add("\\\\");
5715  }
5716  else if(ch == '"')
5717  {
5718  m_SB.Add("\\\"");
5719  }
5720  else if(ch >= 32)
5721  {
5722  m_SB.Add(ch);
5723  }
5724  else switch(ch)
5725  {
5726  case '\b':
5727  m_SB.Add("\\b");
5728  break;
5729  case '\f':
5730  m_SB.Add("\\f");
5731  break;
5732  case '\n':
5733  m_SB.Add("\\n");
5734  break;
5735  case '\r':
5736  m_SB.Add("\\r");
5737  break;
5738  case '\t':
5739  m_SB.Add("\\t");
5740  break;
5741  default:
5742  VMA_ASSERT(0 && "Character not currently supported.");
5743  break;
5744  }
5745  }
5746 }
5747 
5748 void VmaJsonWriter::ContinueString(uint32_t n)
5749 {
5750  VMA_ASSERT(m_InsideString);
5751  m_SB.AddNumber(n);
5752 }
5753 
5754 void VmaJsonWriter::ContinueString(uint64_t n)
5755 {
5756  VMA_ASSERT(m_InsideString);
5757  m_SB.AddNumber(n);
5758 }
5759 
5760 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5761 {
5762  VMA_ASSERT(m_InsideString);
5763  m_SB.AddPointer(ptr);
5764 }
5765 
5766 void VmaJsonWriter::EndString(const char* pStr)
5767 {
5768  VMA_ASSERT(m_InsideString);
5769  if(pStr != VMA_NULL && pStr[0] != '\0')
5770  {
5771  ContinueString(pStr);
5772  }
5773  m_SB.Add('"');
5774  m_InsideString = false;
5775 }
5776 
5777 void VmaJsonWriter::WriteNumber(uint32_t n)
5778 {
5779  VMA_ASSERT(!m_InsideString);
5780  BeginValue(false);
5781  m_SB.AddNumber(n);
5782 }
5783 
5784 void VmaJsonWriter::WriteNumber(uint64_t n)
5785 {
5786  VMA_ASSERT(!m_InsideString);
5787  BeginValue(false);
5788  m_SB.AddNumber(n);
5789 }
5790 
5791 void VmaJsonWriter::WriteBool(bool b)
5792 {
5793  VMA_ASSERT(!m_InsideString);
5794  BeginValue(false);
5795  m_SB.Add(b ? "true" : "false");
5796 }
5797 
5798 void VmaJsonWriter::WriteNull()
5799 {
5800  VMA_ASSERT(!m_InsideString);
5801  BeginValue(false);
5802  m_SB.Add("null");
5803 }
5804 
5805 void VmaJsonWriter::BeginValue(bool isString)
5806 {
5807  if(!m_Stack.empty())
5808  {
5809  StackItem& currItem = m_Stack.back();
5810  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5811  currItem.valueCount % 2 == 0)
5812  {
5813  VMA_ASSERT(isString);
5814  }
5815 
5816  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5817  currItem.valueCount % 2 != 0)
5818  {
5819  m_SB.Add(": ");
5820  }
5821  else if(currItem.valueCount > 0)
5822  {
5823  m_SB.Add(", ");
5824  WriteIndent();
5825  }
5826  else
5827  {
5828  WriteIndent();
5829  }
5830  ++currItem.valueCount;
5831  }
5832 }
5833 
5834 void VmaJsonWriter::WriteIndent(bool oneLess)
5835 {
5836  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5837  {
5838  m_SB.AddNewLine();
5839 
5840  size_t count = m_Stack.size();
5841  if(count > 0 && oneLess)
5842  {
5843  --count;
5844  }
5845  for(size_t i = 0; i < count; ++i)
5846  {
5847  m_SB.Add(INDENT);
5848  }
5849  }
5850 }
5851 
5852 #endif // #if VMA_STATS_STRING_ENABLED
5853 
5855 
5856 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5857 {
5858  if(IsUserDataString())
5859  {
5860  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5861 
5862  FreeUserDataString(hAllocator);
5863 
5864  if(pUserData != VMA_NULL)
5865  {
5866  const char* const newStrSrc = (char*)pUserData;
5867  const size_t newStrLen = strlen(newStrSrc);
5868  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5869  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5870  m_pUserData = newStrDst;
5871  }
5872  }
5873  else
5874  {
5875  m_pUserData = pUserData;
5876  }
5877 }
5878 
5879 void VmaAllocation_T::ChangeBlockAllocation(
5880  VmaAllocator hAllocator,
5881  VmaDeviceMemoryBlock* block,
5882  VkDeviceSize offset)
5883 {
5884  VMA_ASSERT(block != VMA_NULL);
5885  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5886 
5887  // Move mapping reference counter from old block to new block.
5888  if(block != m_BlockAllocation.m_Block)
5889  {
5890  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5891  if(IsPersistentMap())
5892  ++mapRefCount;
5893  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5894  block->Map(hAllocator, mapRefCount, VMA_NULL);
5895  }
5896 
5897  m_BlockAllocation.m_Block = block;
5898  m_BlockAllocation.m_Offset = offset;
5899 }
5900 
5901 VkDeviceSize VmaAllocation_T::GetOffset() const
5902 {
5903  switch(m_Type)
5904  {
5905  case ALLOCATION_TYPE_BLOCK:
5906  return m_BlockAllocation.m_Offset;
5907  case ALLOCATION_TYPE_DEDICATED:
5908  return 0;
5909  default:
5910  VMA_ASSERT(0);
5911  return 0;
5912  }
5913 }
5914 
5915 VkDeviceMemory VmaAllocation_T::GetMemory() const
5916 {
5917  switch(m_Type)
5918  {
5919  case ALLOCATION_TYPE_BLOCK:
5920  return m_BlockAllocation.m_Block->GetDeviceMemory();
5921  case ALLOCATION_TYPE_DEDICATED:
5922  return m_DedicatedAllocation.m_hMemory;
5923  default:
5924  VMA_ASSERT(0);
5925  return VK_NULL_HANDLE;
5926  }
5927 }
5928 
5929 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5930 {
5931  switch(m_Type)
5932  {
5933  case ALLOCATION_TYPE_BLOCK:
5934  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5935  case ALLOCATION_TYPE_DEDICATED:
5936  return m_DedicatedAllocation.m_MemoryTypeIndex;
5937  default:
5938  VMA_ASSERT(0);
5939  return UINT32_MAX;
5940  }
5941 }
5942 
5943 void* VmaAllocation_T::GetMappedData() const
5944 {
5945  switch(m_Type)
5946  {
5947  case ALLOCATION_TYPE_BLOCK:
5948  if(m_MapCount != 0)
5949  {
5950  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5951  VMA_ASSERT(pBlockData != VMA_NULL);
5952  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5953  }
5954  else
5955  {
5956  return VMA_NULL;
5957  }
5958  break;
5959  case ALLOCATION_TYPE_DEDICATED:
5960  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5961  return m_DedicatedAllocation.m_pMappedData;
5962  default:
5963  VMA_ASSERT(0);
5964  return VMA_NULL;
5965  }
5966 }
5967 
5968 bool VmaAllocation_T::CanBecomeLost() const
5969 {
5970  switch(m_Type)
5971  {
5972  case ALLOCATION_TYPE_BLOCK:
5973  return m_BlockAllocation.m_CanBecomeLost;
5974  case ALLOCATION_TYPE_DEDICATED:
5975  return false;
5976  default:
5977  VMA_ASSERT(0);
5978  return false;
5979  }
5980 }
5981 
5982 VmaPool VmaAllocation_T::GetPool() const
5983 {
5984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5985  return m_BlockAllocation.m_hPool;
5986 }
5987 
5988 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5989 {
5990  VMA_ASSERT(CanBecomeLost());
5991 
5992  /*
5993  Warning: This is a carefully designed algorithm.
5994  Do not modify unless you really know what you're doing :)
5995  */
5996  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5997  for(;;)
5998  {
5999  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6000  {
6001  VMA_ASSERT(0);
6002  return false;
6003  }
6004  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6005  {
6006  return false;
6007  }
6008  else // Last use time earlier than current time.
6009  {
6010  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6011  {
6012  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6013  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6014  return true;
6015  }
6016  }
6017  }
6018 }
6019 
6020 #if VMA_STATS_STRING_ENABLED
6021 
6022 // Correspond to values of enum VmaSuballocationType.
6023 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6024  "FREE",
6025  "UNKNOWN",
6026  "BUFFER",
6027  "IMAGE_UNKNOWN",
6028  "IMAGE_LINEAR",
6029  "IMAGE_OPTIMAL",
6030 };
6031 
6032 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6033 {
6034  json.WriteString("Type");
6035  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6036 
6037  json.WriteString("Size");
6038  json.WriteNumber(m_Size);
6039 
6040  if(m_pUserData != VMA_NULL)
6041  {
6042  json.WriteString("UserData");
6043  if(IsUserDataString())
6044  {
6045  json.WriteString((const char*)m_pUserData);
6046  }
6047  else
6048  {
6049  json.BeginString();
6050  json.ContinueString_Pointer(m_pUserData);
6051  json.EndString();
6052  }
6053  }
6054 
6055  json.WriteString("CreationFrameIndex");
6056  json.WriteNumber(m_CreationFrameIndex);
6057 
6058  json.WriteString("LastUseFrameIndex");
6059  json.WriteNumber(GetLastUseFrameIndex());
6060 
6061  if(m_BufferImageUsage != 0)
6062  {
6063  json.WriteString("Usage");
6064  json.WriteNumber(m_BufferImageUsage);
6065  }
6066 }
6067 
6068 #endif
6069 
6070 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6071 {
6072  VMA_ASSERT(IsUserDataString());
6073  if(m_pUserData != VMA_NULL)
6074  {
6075  char* const oldStr = (char*)m_pUserData;
6076  const size_t oldStrLen = strlen(oldStr);
6077  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6078  m_pUserData = VMA_NULL;
6079  }
6080 }
6081 
6082 void VmaAllocation_T::BlockAllocMap()
6083 {
6084  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6085 
6086  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6087  {
6088  ++m_MapCount;
6089  }
6090  else
6091  {
6092  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6093  }
6094 }
6095 
6096 void VmaAllocation_T::BlockAllocUnmap()
6097 {
6098  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6099 
6100  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6101  {
6102  --m_MapCount;
6103  }
6104  else
6105  {
6106  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6107  }
6108 }
6109 
6110 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6111 {
6112  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6113 
6114  if(m_MapCount != 0)
6115  {
6116  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6117  {
6118  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6119  *ppData = m_DedicatedAllocation.m_pMappedData;
6120  ++m_MapCount;
6121  return VK_SUCCESS;
6122  }
6123  else
6124  {
6125  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6126  return VK_ERROR_MEMORY_MAP_FAILED;
6127  }
6128  }
6129  else
6130  {
6131  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6132  hAllocator->m_hDevice,
6133  m_DedicatedAllocation.m_hMemory,
6134  0, // offset
6135  VK_WHOLE_SIZE,
6136  0, // flags
6137  ppData);
6138  if(result == VK_SUCCESS)
6139  {
6140  m_DedicatedAllocation.m_pMappedData = *ppData;
6141  m_MapCount = 1;
6142  }
6143  return result;
6144  }
6145 }
6146 
6147 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6148 {
6149  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6150 
6151  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6152  {
6153  --m_MapCount;
6154  if(m_MapCount == 0)
6155  {
6156  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6157  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6158  hAllocator->m_hDevice,
6159  m_DedicatedAllocation.m_hMemory);
6160  }
6161  }
6162  else
6163  {
6164  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6165  }
6166 }
6167 
6168 #if VMA_STATS_STRING_ENABLED
6169 
6170 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6171 {
6172  json.BeginObject();
6173 
6174  json.WriteString("Blocks");
6175  json.WriteNumber(stat.blockCount);
6176 
6177  json.WriteString("Allocations");
6178  json.WriteNumber(stat.allocationCount);
6179 
6180  json.WriteString("UnusedRanges");
6181  json.WriteNumber(stat.unusedRangeCount);
6182 
6183  json.WriteString("UsedBytes");
6184  json.WriteNumber(stat.usedBytes);
6185 
6186  json.WriteString("UnusedBytes");
6187  json.WriteNumber(stat.unusedBytes);
6188 
6189  if(stat.allocationCount > 1)
6190  {
6191  json.WriteString("AllocationSize");
6192  json.BeginObject(true);
6193  json.WriteString("Min");
6194  json.WriteNumber(stat.allocationSizeMin);
6195  json.WriteString("Avg");
6196  json.WriteNumber(stat.allocationSizeAvg);
6197  json.WriteString("Max");
6198  json.WriteNumber(stat.allocationSizeMax);
6199  json.EndObject();
6200  }
6201 
6202  if(stat.unusedRangeCount > 1)
6203  {
6204  json.WriteString("UnusedRangeSize");
6205  json.BeginObject(true);
6206  json.WriteString("Min");
6207  json.WriteNumber(stat.unusedRangeSizeMin);
6208  json.WriteString("Avg");
6209  json.WriteNumber(stat.unusedRangeSizeAvg);
6210  json.WriteString("Max");
6211  json.WriteNumber(stat.unusedRangeSizeMax);
6212  json.EndObject();
6213  }
6214 
6215  json.EndObject();
6216 }
6217 
6218 #endif // #if VMA_STATS_STRING_ENABLED
6219 
6220 struct VmaSuballocationItemSizeLess
6221 {
6222  bool operator()(
6223  const VmaSuballocationList::iterator lhs,
6224  const VmaSuballocationList::iterator rhs) const
6225  {
6226  return lhs->size < rhs->size;
6227  }
6228  bool operator()(
6229  const VmaSuballocationList::iterator lhs,
6230  VkDeviceSize rhsSize) const
6231  {
6232  return lhs->size < rhsSize;
6233  }
6234 };
6235 
6236 
6238 // class VmaBlockMetadata
6239 
6240 #if VMA_STATS_STRING_ENABLED
6241 
6242 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6243  VkDeviceSize unusedBytes,
6244  size_t allocationCount,
6245  size_t unusedRangeCount) const
6246 {
6247  json.BeginObject();
6248 
6249  json.WriteString("TotalBytes");
6250  json.WriteNumber(GetSize());
6251 
6252  json.WriteString("UnusedBytes");
6253  json.WriteNumber(unusedBytes);
6254 
6255  json.WriteString("Allocations");
6256  json.WriteNumber((uint64_t)allocationCount);
6257 
6258  json.WriteString("UnusedRanges");
6259  json.WriteNumber((uint64_t)unusedRangeCount);
6260 
6261  json.WriteString("Suballocations");
6262  json.BeginArray();
6263 }
6264 
6265 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6266  VkDeviceSize offset,
6267  VmaAllocation hAllocation) const
6268 {
6269  json.BeginObject(true);
6270 
6271  json.WriteString("Offset");
6272  json.WriteNumber(offset);
6273 
6274  hAllocation->PrintParameters(json);
6275 
6276  json.EndObject();
6277 }
6278 
6279 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6280  VkDeviceSize offset,
6281  VkDeviceSize size) const
6282 {
6283  json.BeginObject(true);
6284 
6285  json.WriteString("Offset");
6286  json.WriteNumber(offset);
6287 
6288  json.WriteString("Type");
6289  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6290 
6291  json.WriteString("Size");
6292  json.WriteNumber(size);
6293 
6294  json.EndObject();
6295 }
6296 
6297 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6298 {
6299  json.EndArray();
6300  json.EndObject();
6301 }
6302 
6303 #endif // #if VMA_STATS_STRING_ENABLED
6304 
6306 // class VmaBlockMetadata_Generic
6307 
6308 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6309  m_FreeCount(0),
6310  m_SumFreeSize(0),
6311  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6312  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6313 {
6314 }
6315 
6316 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6317 {
6318 }
6319 
6320 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6321 {
6322  VmaBlockMetadata::Init(size);
6323  m_FreeCount = 1;
6324  m_SumFreeSize = size;
6325 
6326  VmaSuballocation suballoc = {};
6327  suballoc.offset = 0;
6328  suballoc.size = size;
6329  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6330  suballoc.hAllocation = VK_NULL_HANDLE;
6331 
6332  m_Suballocations.push_back(suballoc);
6333  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6334  --suballocItem;
6335  m_FreeSuballocationsBySize.push_back(suballocItem);
6336 }
6337 
6338 bool VmaBlockMetadata_Generic::Validate() const
6339 {
6340  if(m_Suballocations.empty())
6341  {
6342  return false;
6343  }
6344 
6345  // Expected offset of new suballocation as calculated from previous ones.
6346  VkDeviceSize calculatedOffset = 0;
6347  // Expected number of free suballocations as calculated from traversing their list.
6348  uint32_t calculatedFreeCount = 0;
6349  // Expected sum size of free suballocations as calculated from traversing their list.
6350  VkDeviceSize calculatedSumFreeSize = 0;
6351  // Expected number of free suballocations that should be registered in
6352  // m_FreeSuballocationsBySize calculated from traversing their list.
6353  size_t freeSuballocationsToRegister = 0;
6354  // True if previous visited suballocation was free.
6355  bool prevFree = false;
6356 
6357  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6358  suballocItem != m_Suballocations.cend();
6359  ++suballocItem)
6360  {
6361  const VmaSuballocation& subAlloc = *suballocItem;
6362 
6363  // Actual offset of this suballocation doesn't match expected one.
6364  if(subAlloc.offset != calculatedOffset)
6365  {
6366  return false;
6367  }
6368 
6369  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6370  // Two adjacent free suballocations are invalid. They should be merged.
6371  if(prevFree && currFree)
6372  {
6373  return false;
6374  }
6375 
6376  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
6377  {
6378  return false;
6379  }
6380 
6381  if(currFree)
6382  {
6383  calculatedSumFreeSize += subAlloc.size;
6384  ++calculatedFreeCount;
6385  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6386  {
6387  ++freeSuballocationsToRegister;
6388  }
6389 
6390  // Margin required between allocations - every free space must be at least that large.
6391  if(subAlloc.size < VMA_DEBUG_MARGIN)
6392  {
6393  return false;
6394  }
6395  }
6396  else
6397  {
6398  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
6399  {
6400  return false;
6401  }
6402  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
6403  {
6404  return false;
6405  }
6406 
6407  // Margin required between allocations - previous allocation must be free.
6408  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
6409  {
6410  return false;
6411  }
6412  }
6413 
6414  calculatedOffset += subAlloc.size;
6415  prevFree = currFree;
6416  }
6417 
6418  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6419  // match expected one.
6420  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
6421  {
6422  return false;
6423  }
6424 
6425  VkDeviceSize lastSize = 0;
6426  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6427  {
6428  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6429 
6430  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6431  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
6432  {
6433  return false;
6434  }
6435  // They must be sorted by size ascending.
6436  if(suballocItem->size < lastSize)
6437  {
6438  return false;
6439  }
6440 
6441  lastSize = suballocItem->size;
6442  }
6443 
6444  // Check if totals match calculacted values.
6445  if(!ValidateFreeSuballocationList() ||
6446  (calculatedOffset != GetSize()) ||
6447  (calculatedSumFreeSize != m_SumFreeSize) ||
6448  (calculatedFreeCount != m_FreeCount))
6449  {
6450  return false;
6451  }
6452 
6453  return true;
6454 }
6455 
6456 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6457 {
6458  if(!m_FreeSuballocationsBySize.empty())
6459  {
6460  return m_FreeSuballocationsBySize.back()->size;
6461  }
6462  else
6463  {
6464  return 0;
6465  }
6466 }
6467 
6468 bool VmaBlockMetadata_Generic::IsEmpty() const
6469 {
6470  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6471 }
6472 
6473 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6474 {
6475  outInfo.blockCount = 1;
6476 
6477  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6478  outInfo.allocationCount = rangeCount - m_FreeCount;
6479  outInfo.unusedRangeCount = m_FreeCount;
6480 
6481  outInfo.unusedBytes = m_SumFreeSize;
6482  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6483 
6484  outInfo.allocationSizeMin = UINT64_MAX;
6485  outInfo.allocationSizeMax = 0;
6486  outInfo.unusedRangeSizeMin = UINT64_MAX;
6487  outInfo.unusedRangeSizeMax = 0;
6488 
6489  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6490  suballocItem != m_Suballocations.cend();
6491  ++suballocItem)
6492  {
6493  const VmaSuballocation& suballoc = *suballocItem;
6494  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6495  {
6496  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6497  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6498  }
6499  else
6500  {
6501  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6502  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6503  }
6504  }
6505 }
6506 
6507 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6508 {
6509  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6510 
6511  inoutStats.size += GetSize();
6512  inoutStats.unusedSize += m_SumFreeSize;
6513  inoutStats.allocationCount += rangeCount - m_FreeCount;
6514  inoutStats.unusedRangeCount += m_FreeCount;
6515  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6516 }
6517 
6518 #if VMA_STATS_STRING_ENABLED
6519 
6520 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6521 {
6522  PrintDetailedMap_Begin(json,
6523  m_SumFreeSize, // unusedBytes
6524  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6525  m_FreeCount); // unusedRangeCount
6526 
6527  size_t i = 0;
6528  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6529  suballocItem != m_Suballocations.cend();
6530  ++suballocItem, ++i)
6531  {
6532  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6533  {
6534  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6535  }
6536  else
6537  {
6538  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6539  }
6540  }
6541 
6542  PrintDetailedMap_End(json);
6543 }
6544 
6545 #endif // #if VMA_STATS_STRING_ENABLED
6546 
6547 /*
6548 How many suitable free suballocations to analyze before choosing best one.
6549 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
6550  be chosen.
6551 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
6552  suballocations will be analized and best one will be chosen.
6553 - Any other value is also acceptable.
6554 */
6555 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
6556 
6557 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6558  uint32_t currentFrameIndex,
6559  uint32_t frameInUseCount,
6560  VkDeviceSize bufferImageGranularity,
6561  VkDeviceSize allocSize,
6562  VkDeviceSize allocAlignment,
6563  bool upperAddress,
6564  VmaSuballocationType allocType,
6565  bool canMakeOtherLost,
6566  VmaAllocationRequest* pAllocationRequest)
6567 {
6568  VMA_ASSERT(allocSize > 0);
6569  VMA_ASSERT(!upperAddress);
6570  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6571  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6572  VMA_HEAVY_ASSERT(Validate());
6573 
6574  // There is not enough total free space in this block to fullfill the request: Early return.
6575  if(canMakeOtherLost == false && m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6576  {
6577  return false;
6578  }
6579 
6580  // New algorithm, efficiently searching freeSuballocationsBySize.
6581  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6582  if(freeSuballocCount > 0)
6583  {
6584  if(VMA_BEST_FIT)
6585  {
6586  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6587  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6588  m_FreeSuballocationsBySize.data(),
6589  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6590  allocSize + 2 * VMA_DEBUG_MARGIN,
6591  VmaSuballocationItemSizeLess());
6592  size_t index = it - m_FreeSuballocationsBySize.data();
6593  for(; index < freeSuballocCount; ++index)
6594  {
6595  if(CheckAllocation(
6596  currentFrameIndex,
6597  frameInUseCount,
6598  bufferImageGranularity,
6599  allocSize,
6600  allocAlignment,
6601  allocType,
6602  m_FreeSuballocationsBySize[index],
6603  false, // canMakeOtherLost
6604  &pAllocationRequest->offset,
6605  &pAllocationRequest->itemsToMakeLostCount,
6606  &pAllocationRequest->sumFreeSize,
6607  &pAllocationRequest->sumItemSize))
6608  {
6609  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6610  return true;
6611  }
6612  }
6613  }
6614  else
6615  {
6616  // Search staring from biggest suballocations.
6617  for(size_t index = freeSuballocCount; index--; )
6618  {
6619  if(CheckAllocation(
6620  currentFrameIndex,
6621  frameInUseCount,
6622  bufferImageGranularity,
6623  allocSize,
6624  allocAlignment,
6625  allocType,
6626  m_FreeSuballocationsBySize[index],
6627  false, // canMakeOtherLost
6628  &pAllocationRequest->offset,
6629  &pAllocationRequest->itemsToMakeLostCount,
6630  &pAllocationRequest->sumFreeSize,
6631  &pAllocationRequest->sumItemSize))
6632  {
6633  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6634  return true;
6635  }
6636  }
6637  }
6638  }
6639 
6640  if(canMakeOtherLost)
6641  {
6642  // Brute-force algorithm. TODO: Come up with something better.
6643 
6644  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6645  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6646 
6647  VmaAllocationRequest tmpAllocRequest = {};
6648  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6649  suballocIt != m_Suballocations.end();
6650  ++suballocIt)
6651  {
6652  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6653  suballocIt->hAllocation->CanBecomeLost())
6654  {
6655  if(CheckAllocation(
6656  currentFrameIndex,
6657  frameInUseCount,
6658  bufferImageGranularity,
6659  allocSize,
6660  allocAlignment,
6661  allocType,
6662  suballocIt,
6663  canMakeOtherLost,
6664  &tmpAllocRequest.offset,
6665  &tmpAllocRequest.itemsToMakeLostCount,
6666  &tmpAllocRequest.sumFreeSize,
6667  &tmpAllocRequest.sumItemSize))
6668  {
6669  tmpAllocRequest.item = suballocIt;
6670 
6671  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
6672  {
6673  *pAllocationRequest = tmpAllocRequest;
6674  }
6675  }
6676  }
6677  }
6678 
6679  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6680  {
6681  return true;
6682  }
6683  }
6684 
6685  return false;
6686 }
6687 
6688 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
6689  uint32_t currentFrameIndex,
6690  uint32_t frameInUseCount,
6691  VmaAllocationRequest* pAllocationRequest)
6692 {
6693  while(pAllocationRequest->itemsToMakeLostCount > 0)
6694  {
6695  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6696  {
6697  ++pAllocationRequest->item;
6698  }
6699  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6700  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6701  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6702  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6703  {
6704  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6705  --pAllocationRequest->itemsToMakeLostCount;
6706  }
6707  else
6708  {
6709  return false;
6710  }
6711  }
6712 
6713  VMA_HEAVY_ASSERT(Validate());
6714  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6715  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6716 
6717  return true;
6718 }
6719 
6720 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6721 {
6722  uint32_t lostAllocationCount = 0;
6723  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6724  it != m_Suballocations.end();
6725  ++it)
6726  {
6727  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6728  it->hAllocation->CanBecomeLost() &&
6729  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6730  {
6731  it = FreeSuballocation(it);
6732  ++lostAllocationCount;
6733  }
6734  }
6735  return lostAllocationCount;
6736 }
6737 
6738 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
6739 {
6740  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6741  it != m_Suballocations.end();
6742  ++it)
6743  {
6744  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6745  {
6746  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6747  {
6748  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6749  return VK_ERROR_VALIDATION_FAILED_EXT;
6750  }
6751  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6752  {
6753  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6754  return VK_ERROR_VALIDATION_FAILED_EXT;
6755  }
6756  }
6757  }
6758 
6759  return VK_SUCCESS;
6760 }
6761 
6762 void VmaBlockMetadata_Generic::Alloc(
6763  const VmaAllocationRequest& request,
6764  VmaSuballocationType type,
6765  VkDeviceSize allocSize,
6766  bool upperAddress,
6767  VmaAllocation hAllocation)
6768 {
6769  VMA_ASSERT(!upperAddress);
6770  VMA_ASSERT(request.item != m_Suballocations.end());
6771  VmaSuballocation& suballoc = *request.item;
6772  // Given suballocation is a free block.
6773  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6774  // Given offset is inside this suballocation.
6775  VMA_ASSERT(request.offset >= suballoc.offset);
6776  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
6777  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
6778  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
6779 
6780  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
6781  // it to become used.
6782  UnregisterFreeSuballocation(request.item);
6783 
6784  suballoc.offset = request.offset;
6785  suballoc.size = allocSize;
6786  suballoc.type = type;
6787  suballoc.hAllocation = hAllocation;
6788 
6789  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
6790  if(paddingEnd)
6791  {
6792  VmaSuballocation paddingSuballoc = {};
6793  paddingSuballoc.offset = request.offset + allocSize;
6794  paddingSuballoc.size = paddingEnd;
6795  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6796  VmaSuballocationList::iterator next = request.item;
6797  ++next;
6798  const VmaSuballocationList::iterator paddingEndItem =
6799  m_Suballocations.insert(next, paddingSuballoc);
6800  RegisterFreeSuballocation(paddingEndItem);
6801  }
6802 
6803  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
6804  if(paddingBegin)
6805  {
6806  VmaSuballocation paddingSuballoc = {};
6807  paddingSuballoc.offset = request.offset - paddingBegin;
6808  paddingSuballoc.size = paddingBegin;
6809  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6810  const VmaSuballocationList::iterator paddingBeginItem =
6811  m_Suballocations.insert(request.item, paddingSuballoc);
6812  RegisterFreeSuballocation(paddingBeginItem);
6813  }
6814 
6815  // Update totals.
6816  m_FreeCount = m_FreeCount - 1;
6817  if(paddingBegin > 0)
6818  {
6819  ++m_FreeCount;
6820  }
6821  if(paddingEnd > 0)
6822  {
6823  ++m_FreeCount;
6824  }
6825  m_SumFreeSize -= allocSize;
6826 }
6827 
6828 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
6829 {
6830  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6831  suballocItem != m_Suballocations.end();
6832  ++suballocItem)
6833  {
6834  VmaSuballocation& suballoc = *suballocItem;
6835  if(suballoc.hAllocation == allocation)
6836  {
6837  FreeSuballocation(suballocItem);
6838  VMA_HEAVY_ASSERT(Validate());
6839  return;
6840  }
6841  }
6842  VMA_ASSERT(0 && "Not found!");
6843 }
6844 
6845 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
6846 {
6847  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6848  suballocItem != m_Suballocations.end();
6849  ++suballocItem)
6850  {
6851  VmaSuballocation& suballoc = *suballocItem;
6852  if(suballoc.offset == offset)
6853  {
6854  FreeSuballocation(suballocItem);
6855  return;
6856  }
6857  }
6858  VMA_ASSERT(0 && "Not found!");
6859 }
6860 
6861 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
6862 {
6863  VkDeviceSize lastSize = 0;
6864  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
6865  {
6866  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
6867 
6868  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6869  {
6870  VMA_ASSERT(0);
6871  return false;
6872  }
6873  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6874  {
6875  VMA_ASSERT(0);
6876  return false;
6877  }
6878  if(it->size < lastSize)
6879  {
6880  VMA_ASSERT(0);
6881  return false;
6882  }
6883 
6884  lastSize = it->size;
6885  }
6886  return true;
6887 }
6888 
6889 bool VmaBlockMetadata_Generic::CheckAllocation(
6890  uint32_t currentFrameIndex,
6891  uint32_t frameInUseCount,
6892  VkDeviceSize bufferImageGranularity,
6893  VkDeviceSize allocSize,
6894  VkDeviceSize allocAlignment,
6895  VmaSuballocationType allocType,
6896  VmaSuballocationList::const_iterator suballocItem,
6897  bool canMakeOtherLost,
6898  VkDeviceSize* pOffset,
6899  size_t* itemsToMakeLostCount,
6900  VkDeviceSize* pSumFreeSize,
6901  VkDeviceSize* pSumItemSize) const
6902 {
6903  VMA_ASSERT(allocSize > 0);
6904  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6905  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6906  VMA_ASSERT(pOffset != VMA_NULL);
6907 
6908  *itemsToMakeLostCount = 0;
6909  *pSumFreeSize = 0;
6910  *pSumItemSize = 0;
6911 
6912  if(canMakeOtherLost)
6913  {
6914  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6915  {
6916  *pSumFreeSize = suballocItem->size;
6917  }
6918  else
6919  {
6920  if(suballocItem->hAllocation->CanBecomeLost() &&
6921  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6922  {
6923  ++*itemsToMakeLostCount;
6924  *pSumItemSize = suballocItem->size;
6925  }
6926  else
6927  {
6928  return false;
6929  }
6930  }
6931 
6932  // Remaining size is too small for this request: Early return.
6933  if(GetSize() - suballocItem->offset < allocSize)
6934  {
6935  return false;
6936  }
6937 
6938  // Start from offset equal to beginning of this suballocation.
6939  *pOffset = suballocItem->offset;
6940 
6941  // Apply VMA_DEBUG_MARGIN at the beginning.
6942  if(VMA_DEBUG_MARGIN > 0)
6943  {
6944  *pOffset += VMA_DEBUG_MARGIN;
6945  }
6946 
6947  // Apply alignment.
6948  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6949 
6950  // Check previous suballocations for BufferImageGranularity conflicts.
6951  // Make bigger alignment if necessary.
6952  if(bufferImageGranularity > 1)
6953  {
6954  bool bufferImageGranularityConflict = false;
6955  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6956  while(prevSuballocItem != m_Suballocations.cbegin())
6957  {
6958  --prevSuballocItem;
6959  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6960  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6961  {
6962  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6963  {
6964  bufferImageGranularityConflict = true;
6965  break;
6966  }
6967  }
6968  else
6969  // Already on previous page.
6970  break;
6971  }
6972  if(bufferImageGranularityConflict)
6973  {
6974  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6975  }
6976  }
6977 
6978  // Now that we have final *pOffset, check if we are past suballocItem.
6979  // If yes, return false - this function should be called for another suballocItem as starting point.
6980  if(*pOffset >= suballocItem->offset + suballocItem->size)
6981  {
6982  return false;
6983  }
6984 
6985  // Calculate padding at the beginning based on current offset.
6986  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6987 
6988  // Calculate required margin at the end.
6989  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
6990 
6991  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6992  // Another early return check.
6993  if(suballocItem->offset + totalSize > GetSize())
6994  {
6995  return false;
6996  }
6997 
6998  // Advance lastSuballocItem until desired size is reached.
6999  // Update itemsToMakeLostCount.
7000  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7001  if(totalSize > suballocItem->size)
7002  {
7003  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7004  while(remainingSize > 0)
7005  {
7006  ++lastSuballocItem;
7007  if(lastSuballocItem == m_Suballocations.cend())
7008  {
7009  return false;
7010  }
7011  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7012  {
7013  *pSumFreeSize += lastSuballocItem->size;
7014  }
7015  else
7016  {
7017  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7018  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7019  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7020  {
7021  ++*itemsToMakeLostCount;
7022  *pSumItemSize += lastSuballocItem->size;
7023  }
7024  else
7025  {
7026  return false;
7027  }
7028  }
7029  remainingSize = (lastSuballocItem->size < remainingSize) ?
7030  remainingSize - lastSuballocItem->size : 0;
7031  }
7032  }
7033 
7034  // Check next suballocations for BufferImageGranularity conflicts.
7035  // If conflict exists, we must mark more allocations lost or fail.
7036  if(bufferImageGranularity > 1)
7037  {
7038  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7039  ++nextSuballocItem;
7040  while(nextSuballocItem != m_Suballocations.cend())
7041  {
7042  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7043  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7044  {
7045  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7046  {
7047  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7048  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7049  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7050  {
7051  ++*itemsToMakeLostCount;
7052  }
7053  else
7054  {
7055  return false;
7056  }
7057  }
7058  }
7059  else
7060  {
7061  // Already on next page.
7062  break;
7063  }
7064  ++nextSuballocItem;
7065  }
7066  }
7067  }
7068  else
7069  {
7070  const VmaSuballocation& suballoc = *suballocItem;
7071  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7072 
7073  *pSumFreeSize = suballoc.size;
7074 
7075  // Size of this suballocation is too small for this request: Early return.
7076  if(suballoc.size < allocSize)
7077  {
7078  return false;
7079  }
7080 
7081  // Start from offset equal to beginning of this suballocation.
7082  *pOffset = suballoc.offset;
7083 
7084  // Apply VMA_DEBUG_MARGIN at the beginning.
7085  if(VMA_DEBUG_MARGIN > 0)
7086  {
7087  *pOffset += VMA_DEBUG_MARGIN;
7088  }
7089 
7090  // Apply alignment.
7091  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7092 
7093  // Check previous suballocations for BufferImageGranularity conflicts.
7094  // Make bigger alignment if necessary.
7095  if(bufferImageGranularity > 1)
7096  {
7097  bool bufferImageGranularityConflict = false;
7098  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7099  while(prevSuballocItem != m_Suballocations.cbegin())
7100  {
7101  --prevSuballocItem;
7102  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7103  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7104  {
7105  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7106  {
7107  bufferImageGranularityConflict = true;
7108  break;
7109  }
7110  }
7111  else
7112  // Already on previous page.
7113  break;
7114  }
7115  if(bufferImageGranularityConflict)
7116  {
7117  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7118  }
7119  }
7120 
7121  // Calculate padding at the beginning based on current offset.
7122  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7123 
7124  // Calculate required margin at the end.
7125  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7126 
7127  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7128  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7129  {
7130  return false;
7131  }
7132 
7133  // Check next suballocations for BufferImageGranularity conflicts.
7134  // If conflict exists, allocation cannot be made here.
7135  if(bufferImageGranularity > 1)
7136  {
7137  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7138  ++nextSuballocItem;
7139  while(nextSuballocItem != m_Suballocations.cend())
7140  {
7141  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7142  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7143  {
7144  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7145  {
7146  return false;
7147  }
7148  }
7149  else
7150  {
7151  // Already on next page.
7152  break;
7153  }
7154  ++nextSuballocItem;
7155  }
7156  }
7157  }
7158 
7159  // All tests passed: Success. pOffset is already filled.
7160  return true;
7161 }
7162 
7163 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7164 {
7165  VMA_ASSERT(item != m_Suballocations.end());
7166  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7167 
7168  VmaSuballocationList::iterator nextItem = item;
7169  ++nextItem;
7170  VMA_ASSERT(nextItem != m_Suballocations.end());
7171  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7172 
7173  item->size += nextItem->size;
7174  --m_FreeCount;
7175  m_Suballocations.erase(nextItem);
7176 }
7177 
7178 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7179 {
7180  // Change this suballocation to be marked as free.
7181  VmaSuballocation& suballoc = *suballocItem;
7182  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7183  suballoc.hAllocation = VK_NULL_HANDLE;
7184 
7185  // Update totals.
7186  ++m_FreeCount;
7187  m_SumFreeSize += suballoc.size;
7188 
7189  // Merge with previous and/or next suballocation if it's also free.
7190  bool mergeWithNext = false;
7191  bool mergeWithPrev = false;
7192 
7193  VmaSuballocationList::iterator nextItem = suballocItem;
7194  ++nextItem;
7195  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7196  {
7197  mergeWithNext = true;
7198  }
7199 
7200  VmaSuballocationList::iterator prevItem = suballocItem;
7201  if(suballocItem != m_Suballocations.begin())
7202  {
7203  --prevItem;
7204  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7205  {
7206  mergeWithPrev = true;
7207  }
7208  }
7209 
7210  if(mergeWithNext)
7211  {
7212  UnregisterFreeSuballocation(nextItem);
7213  MergeFreeWithNext(suballocItem);
7214  }
7215 
7216  if(mergeWithPrev)
7217  {
7218  UnregisterFreeSuballocation(prevItem);
7219  MergeFreeWithNext(prevItem);
7220  RegisterFreeSuballocation(prevItem);
7221  return prevItem;
7222  }
7223  else
7224  {
7225  RegisterFreeSuballocation(suballocItem);
7226  return suballocItem;
7227  }
7228 }
7229 
7230 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7231 {
7232  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7233  VMA_ASSERT(item->size > 0);
7234 
7235  // You may want to enable this validation at the beginning or at the end of
7236  // this function, depending on what do you want to check.
7237  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7238 
7239  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7240  {
7241  if(m_FreeSuballocationsBySize.empty())
7242  {
7243  m_FreeSuballocationsBySize.push_back(item);
7244  }
7245  else
7246  {
7247  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7248  }
7249  }
7250 
7251  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7252 }
7253 
7254 
7255 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7256 {
7257  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7258  VMA_ASSERT(item->size > 0);
7259 
7260  // You may want to enable this validation at the beginning or at the end of
7261  // this function, depending on what do you want to check.
7262  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7263 
7264  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7265  {
7266  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7267  m_FreeSuballocationsBySize.data(),
7268  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7269  item,
7270  VmaSuballocationItemSizeLess());
7271  for(size_t index = it - m_FreeSuballocationsBySize.data();
7272  index < m_FreeSuballocationsBySize.size();
7273  ++index)
7274  {
7275  if(m_FreeSuballocationsBySize[index] == item)
7276  {
7277  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7278  return;
7279  }
7280  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7281  }
7282  VMA_ASSERT(0 && "Not found.");
7283  }
7284 
7285  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7286 }
7287 
7289 // class VmaBlockMetadata_Linear
7290 
7291 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7292  m_SumFreeSize(0),
7293  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7294  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7295  m_1stVectorIndex(0),
7296  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7297  m_1stNullItemsBeginCount(0),
7298  m_1stNullItemsMiddleCount(0),
7299  m_2ndNullItemsCount(0)
7300 {
7301 }
7302 
7303 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7304 {
7305 }
7306 
7307 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7308 {
7309  VmaBlockMetadata::Init(size);
7310  m_SumFreeSize = size;
7311 }
7312 
7313 bool VmaBlockMetadata_Linear::Validate() const
7314 {
7315  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7316  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7317 
7318  if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))
7319  {
7320  return false;
7321  }
7322  if(suballocations1st.empty() && !suballocations2nd.empty() &&
7323  m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7324  {
7325  return false;
7326  }
7327  if(!suballocations1st.empty())
7328  {
7329  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7330  if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
7331  {
7332  return false;
7333  }
7334  // Null item at the end should be just pop_back().
7335  if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)
7336  {
7337  return false;
7338  }
7339  }
7340  if(!suballocations2nd.empty())
7341  {
7342  // Null item at the end should be just pop_back().
7343  if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
7344  {
7345  return false;
7346  }
7347  }
7348 
7349  if(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount > suballocations1st.size())
7350  {
7351  return false;
7352  }
7353  if(m_2ndNullItemsCount > suballocations2nd.size())
7354  {
7355  return false;
7356  }
7357 
7358  VkDeviceSize sumUsedSize = 0;
7359  const size_t suballoc1stCount = suballocations1st.size();
7360  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7361 
7362  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7363  {
7364  const size_t suballoc2ndCount = suballocations2nd.size();
7365  size_t nullItem2ndCount = 0;
7366  for(size_t i = 0; i < suballoc2ndCount; ++i)
7367  {
7368  const VmaSuballocation& suballoc = suballocations2nd[i];
7369  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7370 
7371  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7372  {
7373  return false;
7374  }
7375  if(suballoc.offset < offset)
7376  {
7377  return false;
7378  }
7379 
7380  if(!currFree)
7381  {
7382  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7383  {
7384  return false;
7385  }
7386  if(suballoc.hAllocation->GetSize() != suballoc.size)
7387  {
7388  return false;
7389  }
7390  sumUsedSize += suballoc.size;
7391  }
7392  else
7393  {
7394  ++nullItem2ndCount;
7395  }
7396 
7397  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7398  }
7399 
7400  if(nullItem2ndCount != m_2ndNullItemsCount)
7401  {
7402  return false;
7403  }
7404  }
7405 
7406  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7407  {
7408  const VmaSuballocation& suballoc = suballocations1st[i];
7409  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||
7410  suballoc.hAllocation != VK_NULL_HANDLE)
7411  {
7412  return false;
7413  }
7414  }
7415 
7416  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7417 
7418  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7419  {
7420  const VmaSuballocation& suballoc = suballocations1st[i];
7421  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7422 
7423  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7424  {
7425  return false;
7426  }
7427  if(suballoc.offset < offset)
7428  {
7429  return false;
7430  }
7431  if(i < m_1stNullItemsBeginCount && !currFree)
7432  {
7433  return false;
7434  }
7435 
7436  if(!currFree)
7437  {
7438  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7439  {
7440  return false;
7441  }
7442  if(suballoc.hAllocation->GetSize() != suballoc.size)
7443  {
7444  return false;
7445  }
7446  sumUsedSize += suballoc.size;
7447  }
7448  else
7449  {
7450  ++nullItem1stCount;
7451  }
7452 
7453  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7454  }
7455  if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)
7456  {
7457  return false;
7458  }
7459 
7460  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7461  {
7462  const size_t suballoc2ndCount = suballocations2nd.size();
7463  size_t nullItem2ndCount = 0;
7464  for(size_t i = suballoc2ndCount; i--; )
7465  {
7466  const VmaSuballocation& suballoc = suballocations2nd[i];
7467  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7468 
7469  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7470  {
7471  return false;
7472  }
7473  if(suballoc.offset < offset)
7474  {
7475  return false;
7476  }
7477 
7478  if(!currFree)
7479  {
7480  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7481  {
7482  return false;
7483  }
7484  if(suballoc.hAllocation->GetSize() != suballoc.size)
7485  {
7486  return false;
7487  }
7488  sumUsedSize += suballoc.size;
7489  }
7490  else
7491  {
7492  ++nullItem2ndCount;
7493  }
7494 
7495  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7496  }
7497 
7498  if(nullItem2ndCount != m_2ndNullItemsCount)
7499  {
7500  return false;
7501  }
7502  }
7503 
7504  if(offset > GetSize())
7505  {
7506  return false;
7507  }
7508  if(m_SumFreeSize != GetSize() - sumUsedSize)
7509  {
7510  return false;
7511  }
7512 
7513  return true;
7514 }
7515 
7516 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7517 {
7518  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7519  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7520 }
7521 
7522 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7523 {
7524  const VkDeviceSize size = GetSize();
7525 
7526  /*
7527  We don't consider gaps inside allocation vectors with freed allocations because
7528  they are not suitable for reuse in linear allocator. We consider only space that
7529  is available for new allocations.
7530  */
7531  if(IsEmpty())
7532  {
7533  return size;
7534  }
7535 
7536  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7537 
7538  switch(m_2ndVectorMode)
7539  {
7540  case SECOND_VECTOR_EMPTY:
7541  /*
7542  Available space is after end of 1st, as well as before beginning of 1st (which
7543  whould make it a ring buffer).
7544  */
7545  {
7546  const size_t suballocations1stCount = suballocations1st.size();
7547  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7548  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7549  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7550  return VMA_MAX(
7551  firstSuballoc.offset,
7552  size - (lastSuballoc.offset + lastSuballoc.size));
7553  }
7554  break;
7555 
7556  case SECOND_VECTOR_RING_BUFFER:
7557  /*
7558  Available space is only between end of 2nd and beginning of 1st.
7559  */
7560  {
7561  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7562  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7563  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7564  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7565  }
7566  break;
7567 
7568  case SECOND_VECTOR_DOUBLE_STACK:
7569  /*
7570  Available space is only between end of 1st and top of 2nd.
7571  */
7572  {
7573  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7574  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7575  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7576  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7577  }
7578  break;
7579 
7580  default:
7581  VMA_ASSERT(0);
7582  return 0;
7583  }
7584 }
7585 
7586 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7587 {
7588  const VkDeviceSize size = GetSize();
7589  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7590  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7591  const size_t suballoc1stCount = suballocations1st.size();
7592  const size_t suballoc2ndCount = suballocations2nd.size();
7593 
7594  outInfo.blockCount = 1;
7595  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7596  outInfo.unusedRangeCount = 0;
7597  outInfo.usedBytes = 0;
7598  outInfo.allocationSizeMin = UINT64_MAX;
7599  outInfo.allocationSizeMax = 0;
7600  outInfo.unusedRangeSizeMin = UINT64_MAX;
7601  outInfo.unusedRangeSizeMax = 0;
7602 
7603  VkDeviceSize lastOffset = 0;
7604 
7605  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7606  {
7607  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7608  size_t nextAlloc2ndIndex = 0;
7609  while(lastOffset < freeSpace2ndTo1stEnd)
7610  {
7611  // Find next non-null allocation or move nextAllocIndex to the end.
7612  while(nextAlloc2ndIndex < suballoc2ndCount &&
7613  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7614  {
7615  ++nextAlloc2ndIndex;
7616  }
7617 
7618  // Found non-null allocation.
7619  if(nextAlloc2ndIndex < suballoc2ndCount)
7620  {
7621  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7622 
7623  // 1. Process free space before this allocation.
7624  if(lastOffset < suballoc.offset)
7625  {
7626  // There is free space from lastOffset to suballoc.offset.
7627  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7628  ++outInfo.unusedRangeCount;
7629  outInfo.unusedBytes += unusedRangeSize;
7630  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7631  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7632  }
7633 
7634  // 2. Process this allocation.
7635  // There is allocation with suballoc.offset, suballoc.size.
7636  outInfo.usedBytes += suballoc.size;
7637  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7638  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7639 
7640  // 3. Prepare for next iteration.
7641  lastOffset = suballoc.offset + suballoc.size;
7642  ++nextAlloc2ndIndex;
7643  }
7644  // We are at the end.
7645  else
7646  {
7647  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7648  if(lastOffset < freeSpace2ndTo1stEnd)
7649  {
7650  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7651  ++outInfo.unusedRangeCount;
7652  outInfo.unusedBytes += unusedRangeSize;
7653  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7654  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7655  }
7656 
7657  // End of loop.
7658  lastOffset = freeSpace2ndTo1stEnd;
7659  }
7660  }
7661  }
7662 
7663  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7664  const VkDeviceSize freeSpace1stTo2ndEnd =
7665  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7666  while(lastOffset < freeSpace1stTo2ndEnd)
7667  {
7668  // Find next non-null allocation or move nextAllocIndex to the end.
7669  while(nextAlloc1stIndex < suballoc1stCount &&
7670  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7671  {
7672  ++nextAlloc1stIndex;
7673  }
7674 
7675  // Found non-null allocation.
7676  if(nextAlloc1stIndex < suballoc1stCount)
7677  {
7678  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7679 
7680  // 1. Process free space before this allocation.
7681  if(lastOffset < suballoc.offset)
7682  {
7683  // There is free space from lastOffset to suballoc.offset.
7684  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7685  ++outInfo.unusedRangeCount;
7686  outInfo.unusedBytes += unusedRangeSize;
7687  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7688  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7689  }
7690 
7691  // 2. Process this allocation.
7692  // There is allocation with suballoc.offset, suballoc.size.
7693  outInfo.usedBytes += suballoc.size;
7694  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7695  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7696 
7697  // 3. Prepare for next iteration.
7698  lastOffset = suballoc.offset + suballoc.size;
7699  ++nextAlloc1stIndex;
7700  }
7701  // We are at the end.
7702  else
7703  {
7704  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7705  if(lastOffset < freeSpace1stTo2ndEnd)
7706  {
7707  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7708  ++outInfo.unusedRangeCount;
7709  outInfo.unusedBytes += unusedRangeSize;
7710  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7711  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7712  }
7713 
7714  // End of loop.
7715  lastOffset = freeSpace1stTo2ndEnd;
7716  }
7717  }
7718 
7719  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7720  {
7721  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7722  while(lastOffset < size)
7723  {
7724  // Find next non-null allocation or move nextAllocIndex to the end.
7725  while(nextAlloc2ndIndex != SIZE_MAX &&
7726  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7727  {
7728  --nextAlloc2ndIndex;
7729  }
7730 
7731  // Found non-null allocation.
7732  if(nextAlloc2ndIndex != SIZE_MAX)
7733  {
7734  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7735 
7736  // 1. Process free space before this allocation.
7737  if(lastOffset < suballoc.offset)
7738  {
7739  // There is free space from lastOffset to suballoc.offset.
7740  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7741  ++outInfo.unusedRangeCount;
7742  outInfo.unusedBytes += unusedRangeSize;
7743  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7744  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7745  }
7746 
7747  // 2. Process this allocation.
7748  // There is allocation with suballoc.offset, suballoc.size.
7749  outInfo.usedBytes += suballoc.size;
7750  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7751  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7752 
7753  // 3. Prepare for next iteration.
7754  lastOffset = suballoc.offset + suballoc.size;
7755  --nextAlloc2ndIndex;
7756  }
7757  // We are at the end.
7758  else
7759  {
7760  // There is free space from lastOffset to size.
7761  if(lastOffset < size)
7762  {
7763  const VkDeviceSize unusedRangeSize = size - lastOffset;
7764  ++outInfo.unusedRangeCount;
7765  outInfo.unusedBytes += unusedRangeSize;
7766  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7767  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7768  }
7769 
7770  // End of loop.
7771  lastOffset = size;
7772  }
7773  }
7774  }
7775 
7776  outInfo.unusedBytes = size - outInfo.usedBytes;
7777 }
7778 
7779 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
7780 {
7781  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7782  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7783  const VkDeviceSize size = GetSize();
7784  const size_t suballoc1stCount = suballocations1st.size();
7785  const size_t suballoc2ndCount = suballocations2nd.size();
7786 
7787  inoutStats.size += size;
7788 
7789  VkDeviceSize lastOffset = 0;
7790 
7791  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7792  {
7793  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7794  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
7795  while(lastOffset < freeSpace2ndTo1stEnd)
7796  {
7797  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7798  while(nextAlloc2ndIndex < suballoc2ndCount &&
7799  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7800  {
7801  ++nextAlloc2ndIndex;
7802  }
7803 
7804  // Found non-null allocation.
7805  if(nextAlloc2ndIndex < suballoc2ndCount)
7806  {
7807  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7808 
7809  // 1. Process free space before this allocation.
7810  if(lastOffset < suballoc.offset)
7811  {
7812  // There is free space from lastOffset to suballoc.offset.
7813  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7814  inoutStats.unusedSize += unusedRangeSize;
7815  ++inoutStats.unusedRangeCount;
7816  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7817  }
7818 
7819  // 2. Process this allocation.
7820  // There is allocation with suballoc.offset, suballoc.size.
7821  ++inoutStats.allocationCount;
7822 
7823  // 3. Prepare for next iteration.
7824  lastOffset = suballoc.offset + suballoc.size;
7825  ++nextAlloc2ndIndex;
7826  }
7827  // We are at the end.
7828  else
7829  {
7830  if(lastOffset < freeSpace2ndTo1stEnd)
7831  {
7832  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7833  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7834  inoutStats.unusedSize += unusedRangeSize;
7835  ++inoutStats.unusedRangeCount;
7836  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7837  }
7838 
7839  // End of loop.
7840  lastOffset = freeSpace2ndTo1stEnd;
7841  }
7842  }
7843  }
7844 
7845  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7846  const VkDeviceSize freeSpace1stTo2ndEnd =
7847  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7848  while(lastOffset < freeSpace1stTo2ndEnd)
7849  {
7850  // Find next non-null allocation or move nextAllocIndex to the end.
7851  while(nextAlloc1stIndex < suballoc1stCount &&
7852  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7853  {
7854  ++nextAlloc1stIndex;
7855  }
7856 
7857  // Found non-null allocation.
7858  if(nextAlloc1stIndex < suballoc1stCount)
7859  {
7860  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7861 
7862  // 1. Process free space before this allocation.
7863  if(lastOffset < suballoc.offset)
7864  {
7865  // There is free space from lastOffset to suballoc.offset.
7866  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7867  inoutStats.unusedSize += unusedRangeSize;
7868  ++inoutStats.unusedRangeCount;
7869  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7870  }
7871 
7872  // 2. Process this allocation.
7873  // There is allocation with suballoc.offset, suballoc.size.
7874  ++inoutStats.allocationCount;
7875 
7876  // 3. Prepare for next iteration.
7877  lastOffset = suballoc.offset + suballoc.size;
7878  ++nextAlloc1stIndex;
7879  }
7880  // We are at the end.
7881  else
7882  {
7883  if(lastOffset < freeSpace1stTo2ndEnd)
7884  {
7885  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7886  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7887  inoutStats.unusedSize += unusedRangeSize;
7888  ++inoutStats.unusedRangeCount;
7889  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7890  }
7891 
7892  // End of loop.
7893  lastOffset = freeSpace1stTo2ndEnd;
7894  }
7895  }
7896 
7897  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7898  {
7899  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7900  while(lastOffset < size)
7901  {
7902  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7903  while(nextAlloc2ndIndex != SIZE_MAX &&
7904  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7905  {
7906  --nextAlloc2ndIndex;
7907  }
7908 
7909  // Found non-null allocation.
7910  if(nextAlloc2ndIndex != SIZE_MAX)
7911  {
7912  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7913 
7914  // 1. Process free space before this allocation.
7915  if(lastOffset < suballoc.offset)
7916  {
7917  // There is free space from lastOffset to suballoc.offset.
7918  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7919  inoutStats.unusedSize += unusedRangeSize;
7920  ++inoutStats.unusedRangeCount;
7921  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7922  }
7923 
7924  // 2. Process this allocation.
7925  // There is allocation with suballoc.offset, suballoc.size.
7926  ++inoutStats.allocationCount;
7927 
7928  // 3. Prepare for next iteration.
7929  lastOffset = suballoc.offset + suballoc.size;
7930  --nextAlloc2ndIndex;
7931  }
7932  // We are at the end.
7933  else
7934  {
7935  if(lastOffset < size)
7936  {
7937  // There is free space from lastOffset to size.
7938  const VkDeviceSize unusedRangeSize = size - lastOffset;
7939  inoutStats.unusedSize += unusedRangeSize;
7940  ++inoutStats.unusedRangeCount;
7941  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7942  }
7943 
7944  // End of loop.
7945  lastOffset = size;
7946  }
7947  }
7948  }
7949 }
7950 
7951 #if VMA_STATS_STRING_ENABLED
7952 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
7953 {
7954  const VkDeviceSize size = GetSize();
7955  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7956  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7957  const size_t suballoc1stCount = suballocations1st.size();
7958  const size_t suballoc2ndCount = suballocations2nd.size();
7959 
7960  // FIRST PASS
7961 
7962  size_t unusedRangeCount = 0;
7963  VkDeviceSize usedBytes = 0;
7964 
7965  VkDeviceSize lastOffset = 0;
7966 
7967  size_t alloc2ndCount = 0;
7968  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7969  {
7970  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7971  size_t nextAlloc2ndIndex = 0;
7972  while(lastOffset < freeSpace2ndTo1stEnd)
7973  {
7974  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7975  while(nextAlloc2ndIndex < suballoc2ndCount &&
7976  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7977  {
7978  ++nextAlloc2ndIndex;
7979  }
7980 
7981  // Found non-null allocation.
7982  if(nextAlloc2ndIndex < suballoc2ndCount)
7983  {
7984  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7985 
7986  // 1. Process free space before this allocation.
7987  if(lastOffset < suballoc.offset)
7988  {
7989  // There is free space from lastOffset to suballoc.offset.
7990  ++unusedRangeCount;
7991  }
7992 
7993  // 2. Process this allocation.
7994  // There is allocation with suballoc.offset, suballoc.size.
7995  ++alloc2ndCount;
7996  usedBytes += suballoc.size;
7997 
7998  // 3. Prepare for next iteration.
7999  lastOffset = suballoc.offset + suballoc.size;
8000  ++nextAlloc2ndIndex;
8001  }
8002  // We are at the end.
8003  else
8004  {
8005  if(lastOffset < freeSpace2ndTo1stEnd)
8006  {
8007  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8008  ++unusedRangeCount;
8009  }
8010 
8011  // End of loop.
8012  lastOffset = freeSpace2ndTo1stEnd;
8013  }
8014  }
8015  }
8016 
8017  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8018  size_t alloc1stCount = 0;
8019  const VkDeviceSize freeSpace1stTo2ndEnd =
8020  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8021  while(lastOffset < freeSpace1stTo2ndEnd)
8022  {
8023  // Find next non-null allocation or move nextAllocIndex to the end.
8024  while(nextAlloc1stIndex < suballoc1stCount &&
8025  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8026  {
8027  ++nextAlloc1stIndex;
8028  }
8029 
8030  // Found non-null allocation.
8031  if(nextAlloc1stIndex < suballoc1stCount)
8032  {
8033  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8034 
8035  // 1. Process free space before this allocation.
8036  if(lastOffset < suballoc.offset)
8037  {
8038  // There is free space from lastOffset to suballoc.offset.
8039  ++unusedRangeCount;
8040  }
8041 
8042  // 2. Process this allocation.
8043  // There is allocation with suballoc.offset, suballoc.size.
8044  ++alloc1stCount;
8045  usedBytes += suballoc.size;
8046 
8047  // 3. Prepare for next iteration.
8048  lastOffset = suballoc.offset + suballoc.size;
8049  ++nextAlloc1stIndex;
8050  }
8051  // We are at the end.
8052  else
8053  {
8054  if(lastOffset < size)
8055  {
8056  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8057  ++unusedRangeCount;
8058  }
8059 
8060  // End of loop.
8061  lastOffset = freeSpace1stTo2ndEnd;
8062  }
8063  }
8064 
8065  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8066  {
8067  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8068  while(lastOffset < size)
8069  {
8070  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8071  while(nextAlloc2ndIndex != SIZE_MAX &&
8072  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8073  {
8074  --nextAlloc2ndIndex;
8075  }
8076 
8077  // Found non-null allocation.
8078  if(nextAlloc2ndIndex != SIZE_MAX)
8079  {
8080  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8081 
8082  // 1. Process free space before this allocation.
8083  if(lastOffset < suballoc.offset)
8084  {
8085  // There is free space from lastOffset to suballoc.offset.
8086  ++unusedRangeCount;
8087  }
8088 
8089  // 2. Process this allocation.
8090  // There is allocation with suballoc.offset, suballoc.size.
8091  ++alloc2ndCount;
8092  usedBytes += suballoc.size;
8093 
8094  // 3. Prepare for next iteration.
8095  lastOffset = suballoc.offset + suballoc.size;
8096  --nextAlloc2ndIndex;
8097  }
8098  // We are at the end.
8099  else
8100  {
8101  if(lastOffset < size)
8102  {
8103  // There is free space from lastOffset to size.
8104  ++unusedRangeCount;
8105  }
8106 
8107  // End of loop.
8108  lastOffset = size;
8109  }
8110  }
8111  }
8112 
8113  const VkDeviceSize unusedBytes = size - usedBytes;
8114  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8115 
8116  // SECOND PASS
8117  lastOffset = 0;
8118 
8119  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8120  {
8121  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8122  size_t nextAlloc2ndIndex = 0;
8123  while(lastOffset < freeSpace2ndTo1stEnd)
8124  {
8125  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8126  while(nextAlloc2ndIndex < suballoc2ndCount &&
8127  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8128  {
8129  ++nextAlloc2ndIndex;
8130  }
8131 
8132  // Found non-null allocation.
8133  if(nextAlloc2ndIndex < suballoc2ndCount)
8134  {
8135  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8136 
8137  // 1. Process free space before this allocation.
8138  if(lastOffset < suballoc.offset)
8139  {
8140  // There is free space from lastOffset to suballoc.offset.
8141  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8142  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8143  }
8144 
8145  // 2. Process this allocation.
8146  // There is allocation with suballoc.offset, suballoc.size.
8147  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8148 
8149  // 3. Prepare for next iteration.
8150  lastOffset = suballoc.offset + suballoc.size;
8151  ++nextAlloc2ndIndex;
8152  }
8153  // We are at the end.
8154  else
8155  {
8156  if(lastOffset < freeSpace2ndTo1stEnd)
8157  {
8158  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8159  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8160  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8161  }
8162 
8163  // End of loop.
8164  lastOffset = freeSpace2ndTo1stEnd;
8165  }
8166  }
8167  }
8168 
8169  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8170  while(lastOffset < freeSpace1stTo2ndEnd)
8171  {
8172  // Find next non-null allocation or move nextAllocIndex to the end.
8173  while(nextAlloc1stIndex < suballoc1stCount &&
8174  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8175  {
8176  ++nextAlloc1stIndex;
8177  }
8178 
8179  // Found non-null allocation.
8180  if(nextAlloc1stIndex < suballoc1stCount)
8181  {
8182  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8183 
8184  // 1. Process free space before this allocation.
8185  if(lastOffset < suballoc.offset)
8186  {
8187  // There is free space from lastOffset to suballoc.offset.
8188  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8189  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8190  }
8191 
8192  // 2. Process this allocation.
8193  // There is allocation with suballoc.offset, suballoc.size.
8194  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8195 
8196  // 3. Prepare for next iteration.
8197  lastOffset = suballoc.offset + suballoc.size;
8198  ++nextAlloc1stIndex;
8199  }
8200  // We are at the end.
8201  else
8202  {
8203  if(lastOffset < freeSpace1stTo2ndEnd)
8204  {
8205  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8206  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8207  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8208  }
8209 
8210  // End of loop.
8211  lastOffset = freeSpace1stTo2ndEnd;
8212  }
8213  }
8214 
8215  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8216  {
8217  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8218  while(lastOffset < size)
8219  {
8220  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8221  while(nextAlloc2ndIndex != SIZE_MAX &&
8222  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8223  {
8224  --nextAlloc2ndIndex;
8225  }
8226 
8227  // Found non-null allocation.
8228  if(nextAlloc2ndIndex != SIZE_MAX)
8229  {
8230  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8231 
8232  // 1. Process free space before this allocation.
8233  if(lastOffset < suballoc.offset)
8234  {
8235  // There is free space from lastOffset to suballoc.offset.
8236  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8237  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8238  }
8239 
8240  // 2. Process this allocation.
8241  // There is allocation with suballoc.offset, suballoc.size.
8242  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8243 
8244  // 3. Prepare for next iteration.
8245  lastOffset = suballoc.offset + suballoc.size;
8246  --nextAlloc2ndIndex;
8247  }
8248  // We are at the end.
8249  else
8250  {
8251  if(lastOffset < size)
8252  {
8253  // There is free space from lastOffset to size.
8254  const VkDeviceSize unusedRangeSize = size - lastOffset;
8255  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8256  }
8257 
8258  // End of loop.
8259  lastOffset = size;
8260  }
8261  }
8262  }
8263 
8264  PrintDetailedMap_End(json);
8265 }
8266 #endif // #if VMA_STATS_STRING_ENABLED
8267 
8268 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8269  uint32_t currentFrameIndex,
8270  uint32_t frameInUseCount,
8271  VkDeviceSize bufferImageGranularity,
8272  VkDeviceSize allocSize,
8273  VkDeviceSize allocAlignment,
8274  bool upperAddress,
8275  VmaSuballocationType allocType,
8276  bool canMakeOtherLost,
8277  VmaAllocationRequest* pAllocationRequest)
8278 {
8279  VMA_ASSERT(allocSize > 0);
8280  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8281  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8282  VMA_HEAVY_ASSERT(Validate());
8283 
8284  const VkDeviceSize size = GetSize();
8285  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8286  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8287 
8288  if(upperAddress)
8289  {
8290  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8291  {
8292  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8293  return false;
8294  }
8295 
8296  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8297  if(allocSize > size)
8298  {
8299  return false;
8300  }
8301  VkDeviceSize resultBaseOffset = size - allocSize;
8302  if(!suballocations2nd.empty())
8303  {
8304  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8305  resultBaseOffset = lastSuballoc.offset - allocSize;
8306  if(allocSize > lastSuballoc.offset)
8307  {
8308  return false;
8309  }
8310  }
8311 
8312  // Start from offset equal to end of free space.
8313  VkDeviceSize resultOffset = resultBaseOffset;
8314 
8315  // Apply VMA_DEBUG_MARGIN at the end.
8316  if(VMA_DEBUG_MARGIN > 0)
8317  {
8318  if(resultOffset < VMA_DEBUG_MARGIN)
8319  {
8320  return false;
8321  }
8322  resultOffset -= VMA_DEBUG_MARGIN;
8323  }
8324 
8325  // Apply alignment.
8326  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8327 
8328  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8329  // Make bigger alignment if necessary.
8330  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8331  {
8332  bool bufferImageGranularityConflict = false;
8333  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8334  {
8335  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8336  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8337  {
8338  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8339  {
8340  bufferImageGranularityConflict = true;
8341  break;
8342  }
8343  }
8344  else
8345  // Already on previous page.
8346  break;
8347  }
8348  if(bufferImageGranularityConflict)
8349  {
8350  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8351  }
8352  }
8353 
8354  // There is enough free space.
8355  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8356  suballocations1st.back().offset + suballocations1st.back().size :
8357  0;
8358  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8359  {
8360  // Check previous suballocations for BufferImageGranularity conflicts.
8361  // If conflict exists, allocation cannot be made here.
8362  if(bufferImageGranularity > 1)
8363  {
8364  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8365  {
8366  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8367  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8368  {
8369  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8370  {
8371  return false;
8372  }
8373  }
8374  else
8375  {
8376  // Already on next page.
8377  break;
8378  }
8379  }
8380  }
8381 
8382  // All tests passed: Success.
8383  pAllocationRequest->offset = resultOffset;
8384  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8385  pAllocationRequest->sumItemSize = 0;
8386  // pAllocationRequest->item unused.
8387  pAllocationRequest->itemsToMakeLostCount = 0;
8388  return true;
8389  }
8390  }
8391  else // !upperAddress
8392  {
8393  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8394  {
8395  // Try to allocate at the end of 1st vector.
8396 
8397  VkDeviceSize resultBaseOffset = 0;
8398  if(!suballocations1st.empty())
8399  {
8400  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8401  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8402  }
8403 
8404  // Start from offset equal to beginning of free space.
8405  VkDeviceSize resultOffset = resultBaseOffset;
8406 
8407  // Apply VMA_DEBUG_MARGIN at the beginning.
8408  if(VMA_DEBUG_MARGIN > 0)
8409  {
8410  resultOffset += VMA_DEBUG_MARGIN;
8411  }
8412 
8413  // Apply alignment.
8414  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8415 
8416  // Check previous suballocations for BufferImageGranularity conflicts.
8417  // Make bigger alignment if necessary.
8418  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8419  {
8420  bool bufferImageGranularityConflict = false;
8421  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8422  {
8423  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8424  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8425  {
8426  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8427  {
8428  bufferImageGranularityConflict = true;
8429  break;
8430  }
8431  }
8432  else
8433  // Already on previous page.
8434  break;
8435  }
8436  if(bufferImageGranularityConflict)
8437  {
8438  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8439  }
8440  }
8441 
8442  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8443  suballocations2nd.back().offset : size;
8444 
8445  // There is enough free space at the end after alignment.
8446  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8447  {
8448  // Check next suballocations for BufferImageGranularity conflicts.
8449  // If conflict exists, allocation cannot be made here.
8450  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8451  {
8452  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8453  {
8454  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8455  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8456  {
8457  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8458  {
8459  return false;
8460  }
8461  }
8462  else
8463  {
8464  // Already on previous page.
8465  break;
8466  }
8467  }
8468  }
8469 
8470  // All tests passed: Success.
8471  pAllocationRequest->offset = resultOffset;
8472  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8473  pAllocationRequest->sumItemSize = 0;
8474  // pAllocationRequest->item unused.
8475  pAllocationRequest->itemsToMakeLostCount = 0;
8476  return true;
8477  }
8478  }
8479 
8480  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8481  // beginning of 1st vector as the end of free space.
8482  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8483  {
8484  VMA_ASSERT(!suballocations1st.empty());
8485 
8486  VkDeviceSize resultBaseOffset = 0;
8487  if(!suballocations2nd.empty())
8488  {
8489  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8490  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8491  }
8492 
8493  // Start from offset equal to beginning of free space.
8494  VkDeviceSize resultOffset = resultBaseOffset;
8495 
8496  // Apply VMA_DEBUG_MARGIN at the beginning.
8497  if(VMA_DEBUG_MARGIN > 0)
8498  {
8499  resultOffset += VMA_DEBUG_MARGIN;
8500  }
8501 
8502  // Apply alignment.
8503  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8504 
8505  // Check previous suballocations for BufferImageGranularity conflicts.
8506  // Make bigger alignment if necessary.
8507  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8508  {
8509  bool bufferImageGranularityConflict = false;
8510  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8511  {
8512  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8513  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8514  {
8515  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8516  {
8517  bufferImageGranularityConflict = true;
8518  break;
8519  }
8520  }
8521  else
8522  // Already on previous page.
8523  break;
8524  }
8525  if(bufferImageGranularityConflict)
8526  {
8527  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8528  }
8529  }
8530 
8531  pAllocationRequest->itemsToMakeLostCount = 0;
8532  pAllocationRequest->sumItemSize = 0;
8533  size_t index1st = m_1stNullItemsBeginCount;
8534 
8535  if(canMakeOtherLost)
8536  {
8537  while(index1st < suballocations1st.size() &&
8538  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8539  {
8540  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8541  const VmaSuballocation& suballoc = suballocations1st[index1st];
8542  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8543  {
8544  // No problem.
8545  }
8546  else
8547  {
8548  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8549  if(suballoc.hAllocation->CanBecomeLost() &&
8550  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8551  {
8552  ++pAllocationRequest->itemsToMakeLostCount;
8553  pAllocationRequest->sumItemSize += suballoc.size;
8554  }
8555  else
8556  {
8557  return false;
8558  }
8559  }
8560  ++index1st;
8561  }
8562 
8563  // Check next suballocations for BufferImageGranularity conflicts.
8564  // If conflict exists, we must mark more allocations lost or fail.
8565  if(bufferImageGranularity > 1)
8566  {
8567  while(index1st < suballocations1st.size())
8568  {
8569  const VmaSuballocation& suballoc = suballocations1st[index1st];
8570  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8571  {
8572  if(suballoc.hAllocation != VK_NULL_HANDLE)
8573  {
8574  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8575  if(suballoc.hAllocation->CanBecomeLost() &&
8576  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8577  {
8578  ++pAllocationRequest->itemsToMakeLostCount;
8579  pAllocationRequest->sumItemSize += suballoc.size;
8580  }
8581  else
8582  {
8583  return false;
8584  }
8585  }
8586  }
8587  else
8588  {
8589  // Already on next page.
8590  break;
8591  }
8592  ++index1st;
8593  }
8594  }
8595  }
8596 
8597  // There is enough free space at the end after alignment.
8598  if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
8599  index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
8600  {
8601  // Check next suballocations for BufferImageGranularity conflicts.
8602  // If conflict exists, allocation cannot be made here.
8603  if(bufferImageGranularity > 1)
8604  {
8605  for(size_t nextSuballocIndex = index1st;
8606  nextSuballocIndex < suballocations1st.size();
8607  nextSuballocIndex++)
8608  {
8609  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8610  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8611  {
8612  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8613  {
8614  return false;
8615  }
8616  }
8617  else
8618  {
8619  // Already on next page.
8620  break;
8621  }
8622  }
8623  }
8624 
8625  // All tests passed: Success.
8626  pAllocationRequest->offset = resultOffset;
8627  pAllocationRequest->sumFreeSize =
8628  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8629  - resultBaseOffset
8630  - pAllocationRequest->sumItemSize;
8631  // pAllocationRequest->item unused.
8632  return true;
8633  }
8634  }
8635  }
8636 
8637  return false;
8638 }
8639 
8640 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8641  uint32_t currentFrameIndex,
8642  uint32_t frameInUseCount,
8643  VmaAllocationRequest* pAllocationRequest)
8644 {
8645  if(pAllocationRequest->itemsToMakeLostCount == 0)
8646  {
8647  return true;
8648  }
8649 
8650  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8651 
8652  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8653  size_t index1st = m_1stNullItemsBeginCount;
8654  size_t madeLostCount = 0;
8655  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8656  {
8657  VMA_ASSERT(index1st < suballocations1st.size());
8658  VmaSuballocation& suballoc = suballocations1st[index1st];
8659  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8660  {
8661  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8662  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8663  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8664  {
8665  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8666  suballoc.hAllocation = VK_NULL_HANDLE;
8667  m_SumFreeSize += suballoc.size;
8668  ++m_1stNullItemsMiddleCount;
8669  ++madeLostCount;
8670  }
8671  else
8672  {
8673  return false;
8674  }
8675  }
8676  ++index1st;
8677  }
8678 
8679  CleanupAfterFree();
8680  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8681 
8682  return true;
8683 }
8684 
8685 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8686 {
8687  uint32_t lostAllocationCount = 0;
8688 
8689  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8690  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8691  {
8692  VmaSuballocation& suballoc = suballocations1st[i];
8693  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8694  suballoc.hAllocation->CanBecomeLost() &&
8695  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8696  {
8697  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8698  suballoc.hAllocation = VK_NULL_HANDLE;
8699  ++m_1stNullItemsMiddleCount;
8700  m_SumFreeSize += suballoc.size;
8701  ++lostAllocationCount;
8702  }
8703  }
8704 
8705  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8706  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8707  {
8708  VmaSuballocation& suballoc = suballocations2nd[i];
8709  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8710  suballoc.hAllocation->CanBecomeLost() &&
8711  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8712  {
8713  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8714  suballoc.hAllocation = VK_NULL_HANDLE;
8715  ++m_2ndNullItemsCount;
8716  ++lostAllocationCount;
8717  }
8718  }
8719 
8720  if(lostAllocationCount)
8721  {
8722  CleanupAfterFree();
8723  }
8724 
8725  return lostAllocationCount;
8726 }
8727 
8728 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8729 {
8730  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8731  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8732  {
8733  const VmaSuballocation& suballoc = suballocations1st[i];
8734  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8735  {
8736  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8737  {
8738  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8739  return VK_ERROR_VALIDATION_FAILED_EXT;
8740  }
8741  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8742  {
8743  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8744  return VK_ERROR_VALIDATION_FAILED_EXT;
8745  }
8746  }
8747  }
8748 
8749  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8750  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8751  {
8752  const VmaSuballocation& suballoc = suballocations2nd[i];
8753  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8754  {
8755  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8756  {
8757  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8758  return VK_ERROR_VALIDATION_FAILED_EXT;
8759  }
8760  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8761  {
8762  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8763  return VK_ERROR_VALIDATION_FAILED_EXT;
8764  }
8765  }
8766  }
8767 
8768  return VK_SUCCESS;
8769 }
8770 
8771 void VmaBlockMetadata_Linear::Alloc(
8772  const VmaAllocationRequest& request,
8773  VmaSuballocationType type,
8774  VkDeviceSize allocSize,
8775  bool upperAddress,
8776  VmaAllocation hAllocation)
8777 {
8778  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
8779 
8780  if(upperAddress)
8781  {
8782  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8783  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8784  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8785  suballocations2nd.push_back(newSuballoc);
8786  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8787  }
8788  else
8789  {
8790  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8791 
8792  // First allocation.
8793  if(suballocations1st.empty())
8794  {
8795  suballocations1st.push_back(newSuballoc);
8796  }
8797  else
8798  {
8799  // New allocation at the end of 1st vector.
8800  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
8801  {
8802  // Check if it fits before the end of the block.
8803  VMA_ASSERT(request.offset + allocSize <= GetSize());
8804  suballocations1st.push_back(newSuballoc);
8805  }
8806  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8807  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
8808  {
8809  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8810 
8811  switch(m_2ndVectorMode)
8812  {
8813  case SECOND_VECTOR_EMPTY:
8814  // First allocation from second part ring buffer.
8815  VMA_ASSERT(suballocations2nd.empty());
8816  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8817  break;
8818  case SECOND_VECTOR_RING_BUFFER:
8819  // 2-part ring buffer is already started.
8820  VMA_ASSERT(!suballocations2nd.empty());
8821  break;
8822  case SECOND_VECTOR_DOUBLE_STACK:
8823  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8824  break;
8825  default:
8826  VMA_ASSERT(0);
8827  }
8828 
8829  suballocations2nd.push_back(newSuballoc);
8830  }
8831  else
8832  {
8833  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8834  }
8835  }
8836  }
8837 
8838  m_SumFreeSize -= newSuballoc.size;
8839 }
8840 
8841 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
8842 {
8843  FreeAtOffset(allocation->GetOffset());
8844 }
8845 
8846 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
8847 {
8848  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8849  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8850 
8851  if(!suballocations1st.empty())
8852  {
8853  // First allocation: Mark it as next empty at the beginning.
8854  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8855  if(firstSuballoc.offset == offset)
8856  {
8857  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8858  firstSuballoc.hAllocation = VK_NULL_HANDLE;
8859  m_SumFreeSize += firstSuballoc.size;
8860  ++m_1stNullItemsBeginCount;
8861  CleanupAfterFree();
8862  return;
8863  }
8864  }
8865 
8866  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8867  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8868  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8869  {
8870  VmaSuballocation& lastSuballoc = suballocations2nd.back();
8871  if(lastSuballoc.offset == offset)
8872  {
8873  m_SumFreeSize += lastSuballoc.size;
8874  suballocations2nd.pop_back();
8875  CleanupAfterFree();
8876  return;
8877  }
8878  }
8879  // Last allocation in 1st vector.
8880  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8881  {
8882  VmaSuballocation& lastSuballoc = suballocations1st.back();
8883  if(lastSuballoc.offset == offset)
8884  {
8885  m_SumFreeSize += lastSuballoc.size;
8886  suballocations1st.pop_back();
8887  CleanupAfterFree();
8888  return;
8889  }
8890  }
8891 
8892  // Item from the middle of 1st vector.
8893  {
8894  VmaSuballocation refSuballoc;
8895  refSuballoc.offset = offset;
8896  // Rest of members stays uninitialized intentionally for better performance.
8897  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
8898  suballocations1st.begin() + m_1stNullItemsBeginCount,
8899  suballocations1st.end(),
8900  refSuballoc);
8901  if(it != suballocations1st.end())
8902  {
8903  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8904  it->hAllocation = VK_NULL_HANDLE;
8905  ++m_1stNullItemsMiddleCount;
8906  m_SumFreeSize += it->size;
8907  CleanupAfterFree();
8908  return;
8909  }
8910  }
8911 
8912  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8913  {
8914  // Item from the middle of 2nd vector.
8915  VmaSuballocation refSuballoc;
8916  refSuballoc.offset = offset;
8917  // Rest of members stays uninitialized intentionally for better performance.
8918  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8919  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
8920  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
8921  if(it != suballocations2nd.end())
8922  {
8923  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8924  it->hAllocation = VK_NULL_HANDLE;
8925  ++m_2ndNullItemsCount;
8926  m_SumFreeSize += it->size;
8927  CleanupAfterFree();
8928  return;
8929  }
8930  }
8931 
8932  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8933 }
8934 
8935 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8936 {
8937  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8938  const size_t suballocCount = AccessSuballocations1st().size();
8939  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8940 }
8941 
8942 void VmaBlockMetadata_Linear::CleanupAfterFree()
8943 {
8944  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8945  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8946 
8947  if(IsEmpty())
8948  {
8949  suballocations1st.clear();
8950  suballocations2nd.clear();
8951  m_1stNullItemsBeginCount = 0;
8952  m_1stNullItemsMiddleCount = 0;
8953  m_2ndNullItemsCount = 0;
8954  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8955  }
8956  else
8957  {
8958  const size_t suballoc1stCount = suballocations1st.size();
8959  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8960  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8961 
8962  // Find more null items at the beginning of 1st vector.
8963  while(m_1stNullItemsBeginCount < suballoc1stCount &&
8964  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
8965  {
8966  ++m_1stNullItemsBeginCount;
8967  --m_1stNullItemsMiddleCount;
8968  }
8969 
8970  // Find more null items at the end of 1st vector.
8971  while(m_1stNullItemsMiddleCount > 0 &&
8972  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
8973  {
8974  --m_1stNullItemsMiddleCount;
8975  suballocations1st.pop_back();
8976  }
8977 
8978  // Find more null items at the end of 2nd vector.
8979  while(m_2ndNullItemsCount > 0 &&
8980  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
8981  {
8982  --m_2ndNullItemsCount;
8983  suballocations2nd.pop_back();
8984  }
8985 
8986  if(ShouldCompact1st())
8987  {
8988  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8989  size_t srcIndex = m_1stNullItemsBeginCount;
8990  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8991  {
8992  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
8993  {
8994  ++srcIndex;
8995  }
8996  if(dstIndex != srcIndex)
8997  {
8998  suballocations1st[dstIndex] = suballocations1st[srcIndex];
8999  }
9000  ++srcIndex;
9001  }
9002  suballocations1st.resize(nonNullItemCount);
9003  m_1stNullItemsBeginCount = 0;
9004  m_1stNullItemsMiddleCount = 0;
9005  }
9006 
9007  // 2nd vector became empty.
9008  if(suballocations2nd.empty())
9009  {
9010  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9011  }
9012 
9013  // 1st vector became empty.
9014  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9015  {
9016  suballocations1st.clear();
9017  m_1stNullItemsBeginCount = 0;
9018 
9019  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9020  {
9021  // Swap 1st with 2nd. Now 2nd is empty.
9022  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9023  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9024  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9025  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9026  {
9027  ++m_1stNullItemsBeginCount;
9028  --m_1stNullItemsMiddleCount;
9029  }
9030  m_2ndNullItemsCount = 0;
9031  m_1stVectorIndex ^= 1;
9032  }
9033  }
9034  }
9035 
9036  VMA_HEAVY_ASSERT(Validate());
9037 }
9038 
9039 
9041 // class VmaDeviceMemoryBlock
9042 
9043 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9044  m_pMetadata(VMA_NULL),
9045  m_MemoryTypeIndex(UINT32_MAX),
9046  m_Id(0),
9047  m_hMemory(VK_NULL_HANDLE),
9048  m_MapCount(0),
9049  m_pMappedData(VMA_NULL)
9050 {
9051 }
9052 
9053 void VmaDeviceMemoryBlock::Init(
9054  VmaAllocator hAllocator,
9055  uint32_t newMemoryTypeIndex,
9056  VkDeviceMemory newMemory,
9057  VkDeviceSize newSize,
9058  uint32_t id,
9059  bool linearAlgorithm)
9060 {
9061  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9062 
9063  m_MemoryTypeIndex = newMemoryTypeIndex;
9064  m_Id = id;
9065  m_hMemory = newMemory;
9066 
9067  if(linearAlgorithm)
9068  {
9069  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9070  }
9071  else
9072  {
9073  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9074  }
9075  m_pMetadata->Init(newSize);
9076 }
9077 
9078 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9079 {
9080  // This is the most important assert in the entire library.
9081  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9082  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9083 
9084  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9085  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9086  m_hMemory = VK_NULL_HANDLE;
9087 
9088  vma_delete(allocator, m_pMetadata);
9089  m_pMetadata = VMA_NULL;
9090 }
9091 
9092 bool VmaDeviceMemoryBlock::Validate() const
9093 {
9094  if((m_hMemory == VK_NULL_HANDLE) ||
9095  (m_pMetadata->GetSize() == 0))
9096  {
9097  return false;
9098  }
9099 
9100  return m_pMetadata->Validate();
9101 }
9102 
9103 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9104 {
9105  void* pData = nullptr;
9106  VkResult res = Map(hAllocator, 1, &pData);
9107  if(res != VK_SUCCESS)
9108  {
9109  return res;
9110  }
9111 
9112  res = m_pMetadata->CheckCorruption(pData);
9113 
9114  Unmap(hAllocator, 1);
9115 
9116  return res;
9117 }
9118 
9119 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9120 {
9121  if(count == 0)
9122  {
9123  return VK_SUCCESS;
9124  }
9125 
9126  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9127  if(m_MapCount != 0)
9128  {
9129  m_MapCount += count;
9130  VMA_ASSERT(m_pMappedData != VMA_NULL);
9131  if(ppData != VMA_NULL)
9132  {
9133  *ppData = m_pMappedData;
9134  }
9135  return VK_SUCCESS;
9136  }
9137  else
9138  {
9139  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9140  hAllocator->m_hDevice,
9141  m_hMemory,
9142  0, // offset
9143  VK_WHOLE_SIZE,
9144  0, // flags
9145  &m_pMappedData);
9146  if(result == VK_SUCCESS)
9147  {
9148  if(ppData != VMA_NULL)
9149  {
9150  *ppData = m_pMappedData;
9151  }
9152  m_MapCount = count;
9153  }
9154  return result;
9155  }
9156 }
9157 
9158 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9159 {
9160  if(count == 0)
9161  {
9162  return;
9163  }
9164 
9165  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9166  if(m_MapCount >= count)
9167  {
9168  m_MapCount -= count;
9169  if(m_MapCount == 0)
9170  {
9171  m_pMappedData = VMA_NULL;
9172  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9173  }
9174  }
9175  else
9176  {
9177  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9178  }
9179 }
9180 
9181 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9182 {
9183  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9184  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9185 
9186  void* pData;
9187  VkResult res = Map(hAllocator, 1, &pData);
9188  if(res != VK_SUCCESS)
9189  {
9190  return res;
9191  }
9192 
9193  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9194  VmaWriteMagicValue(pData, allocOffset + allocSize);
9195 
9196  Unmap(hAllocator, 1);
9197 
9198  return VK_SUCCESS;
9199 }
9200 
9201 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9202 {
9203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9205 
9206  void* pData;
9207  VkResult res = Map(hAllocator, 1, &pData);
9208  if(res != VK_SUCCESS)
9209  {
9210  return res;
9211  }
9212 
9213  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
9214  {
9215  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
9216  }
9217  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
9218  {
9219  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
9220  }
9221 
9222  Unmap(hAllocator, 1);
9223 
9224  return VK_SUCCESS;
9225 }
9226 
9227 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
9228  const VmaAllocator hAllocator,
9229  const VmaAllocation hAllocation,
9230  VkBuffer hBuffer)
9231 {
9232  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9233  hAllocation->GetBlock() == this);
9234  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9235  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9236  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
9237  hAllocator->m_hDevice,
9238  hBuffer,
9239  m_hMemory,
9240  hAllocation->GetOffset());
9241 }
9242 
9243 VkResult VmaDeviceMemoryBlock::BindImageMemory(
9244  const VmaAllocator hAllocator,
9245  const VmaAllocation hAllocation,
9246  VkImage hImage)
9247 {
9248  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9249  hAllocation->GetBlock() == this);
9250  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9251  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9252  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
9253  hAllocator->m_hDevice,
9254  hImage,
9255  m_hMemory,
9256  hAllocation->GetOffset());
9257 }
9258 
9259 static void InitStatInfo(VmaStatInfo& outInfo)
9260 {
9261  memset(&outInfo, 0, sizeof(outInfo));
9262  outInfo.allocationSizeMin = UINT64_MAX;
9263  outInfo.unusedRangeSizeMin = UINT64_MAX;
9264 }
9265 
9266 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
9267 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
9268 {
9269  inoutInfo.blockCount += srcInfo.blockCount;
9270  inoutInfo.allocationCount += srcInfo.allocationCount;
9271  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
9272  inoutInfo.usedBytes += srcInfo.usedBytes;
9273  inoutInfo.unusedBytes += srcInfo.unusedBytes;
9274  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
9275  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
9276  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
9277  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
9278 }
9279 
9280 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
9281 {
9282  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
9283  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
9284  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
9285  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
9286 }
9287 
9288 VmaPool_T::VmaPool_T(
9289  VmaAllocator hAllocator,
9290  const VmaPoolCreateInfo& createInfo) :
9291  m_BlockVector(
9292  hAllocator,
9293  createInfo.memoryTypeIndex,
9294  createInfo.blockSize,
9295  createInfo.minBlockCount,
9296  createInfo.maxBlockCount,
9297  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
9298  createInfo.frameInUseCount,
9299  true, // isCustomPool
9300  (createInfo.flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0), // linearAlgorithm
9301  m_Id(0)
9302 {
9303 }
9304 
9305 VmaPool_T::~VmaPool_T()
9306 {
9307 }
9308 
9309 #if VMA_STATS_STRING_ENABLED
9310 
9311 #endif // #if VMA_STATS_STRING_ENABLED
9312 
9313 VmaBlockVector::VmaBlockVector(
9314  VmaAllocator hAllocator,
9315  uint32_t memoryTypeIndex,
9316  VkDeviceSize preferredBlockSize,
9317  size_t minBlockCount,
9318  size_t maxBlockCount,
9319  VkDeviceSize bufferImageGranularity,
9320  uint32_t frameInUseCount,
9321  bool isCustomPool,
9322  bool linearAlgorithm) :
9323  m_hAllocator(hAllocator),
9324  m_MemoryTypeIndex(memoryTypeIndex),
9325  m_PreferredBlockSize(preferredBlockSize),
9326  m_MinBlockCount(minBlockCount),
9327  m_MaxBlockCount(maxBlockCount),
9328  m_BufferImageGranularity(bufferImageGranularity),
9329  m_FrameInUseCount(frameInUseCount),
9330  m_IsCustomPool(isCustomPool),
9331  m_LinearAlgorithm(linearAlgorithm),
9332  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
9333  m_HasEmptyBlock(false),
9334  m_pDefragmentator(VMA_NULL),
9335  m_NextBlockId(0)
9336 {
9337 }
9338 
9339 VmaBlockVector::~VmaBlockVector()
9340 {
9341  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
9342 
9343  for(size_t i = m_Blocks.size(); i--; )
9344  {
9345  m_Blocks[i]->Destroy(m_hAllocator);
9346  vma_delete(m_hAllocator, m_Blocks[i]);
9347  }
9348 }
9349 
9350 VkResult VmaBlockVector::CreateMinBlocks()
9351 {
9352  for(size_t i = 0; i < m_MinBlockCount; ++i)
9353  {
9354  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
9355  if(res != VK_SUCCESS)
9356  {
9357  return res;
9358  }
9359  }
9360  return VK_SUCCESS;
9361 }
9362 
9363 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
9364 {
9365  pStats->size = 0;
9366  pStats->unusedSize = 0;
9367  pStats->allocationCount = 0;
9368  pStats->unusedRangeCount = 0;
9369  pStats->unusedRangeSizeMax = 0;
9370 
9371  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9372 
9373  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9374  {
9375  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
9376  VMA_ASSERT(pBlock);
9377  VMA_HEAVY_ASSERT(pBlock->Validate());
9378  pBlock->m_pMetadata->AddPoolStats(*pStats);
9379  }
9380 }
9381 
9382 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
9383 {
9384  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9385  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
9386  (VMA_DEBUG_MARGIN > 0) &&
9387  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
9388 }
9389 
9390 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
9391 
9392 VkResult VmaBlockVector::Allocate(
9393  VmaPool hCurrentPool,
9394  uint32_t currentFrameIndex,
9395  VkDeviceSize size,
9396  VkDeviceSize alignment,
9397  const VmaAllocationCreateInfo& createInfo,
9398  VmaSuballocationType suballocType,
9399  VmaAllocation* pAllocation)
9400 {
9401  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
9402  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
9403  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9404  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
9405  const bool canCreateNewBlock =
9406  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
9407  (m_Blocks.size() < m_MaxBlockCount);
9408 
9409  // Upper address can only be used with linear allocator.
9410  if(isUpperAddress && !m_LinearAlgorithm)
9411  {
9412  return VK_ERROR_FEATURE_NOT_PRESENT;
9413  }
9414 
9415  // Early reject: requested allocation size is larger that maximum block size for this block vector.
9416  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
9417  {
9418  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9419  }
9420 
9421  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9422 
9423  /*
9424  Under certain condition, this whole section can be skipped for optimization, so
9425  we move on directly to trying to allocate with canMakeOtherLost. That's the case
9426  e.g. for custom pools with linear algorithm.
9427  */
9428  if(!canMakeOtherLost || canCreateNewBlock)
9429  {
9430  // 1. Search existing allocations. Try to allocate without making other allocations lost.
9431  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9432  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9433  {
9434  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9435  VMA_ASSERT(pCurrBlock);
9436  VmaAllocationRequest currRequest = {};
9437  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9438  currentFrameIndex,
9439  m_FrameInUseCount,
9440  m_BufferImageGranularity,
9441  size,
9442  alignment,
9443  isUpperAddress,
9444  suballocType,
9445  false, // canMakeOtherLost
9446  &currRequest))
9447  {
9448  // Allocate from pCurrBlock.
9449  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
9450 
9451  if(mapped)
9452  {
9453  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
9454  if(res != VK_SUCCESS)
9455  {
9456  return res;
9457  }
9458  }
9459 
9460  // We no longer have an empty Allocation.
9461  if(pCurrBlock->m_pMetadata->IsEmpty())
9462  {
9463  m_HasEmptyBlock = false;
9464  }
9465 
9466  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9467  pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
9468  (*pAllocation)->InitBlockAllocation(
9469  hCurrentPool,
9470  pCurrBlock,
9471  currRequest.offset,
9472  alignment,
9473  size,
9474  suballocType,
9475  mapped,
9476  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9477  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
9478  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9479  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9480  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9481  {
9482  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9483  }
9484  if(IsCorruptionDetectionEnabled())
9485  {
9486  VkResult res = pCurrBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
9487  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9488  }
9489  return VK_SUCCESS;
9490  }
9491  }
9492 
9493  // 2. Try to create new block.
9494  if(canCreateNewBlock)
9495  {
9496  // Calculate optimal size for new block.
9497  VkDeviceSize newBlockSize = m_PreferredBlockSize;
9498  uint32_t newBlockSizeShift = 0;
9499  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
9500 
9501  // Allocating blocks of other sizes is allowed only in default pools.
9502  // In custom pools block size is fixed.
9503  if(m_IsCustomPool == false)
9504  {
9505  // Allocate 1/8, 1/4, 1/2 as first blocks.
9506  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
9507  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
9508  {
9509  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9510  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
9511  {
9512  newBlockSize = smallerNewBlockSize;
9513  ++newBlockSizeShift;
9514  }
9515  else
9516  {
9517  break;
9518  }
9519  }
9520  }
9521 
9522  size_t newBlockIndex = 0;
9523  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
9524  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
9525  if(m_IsCustomPool == false)
9526  {
9527  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
9528  {
9529  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9530  if(smallerNewBlockSize >= size)
9531  {
9532  newBlockSize = smallerNewBlockSize;
9533  ++newBlockSizeShift;
9534  res = CreateBlock(newBlockSize, &newBlockIndex);
9535  }
9536  else
9537  {
9538  break;
9539  }
9540  }
9541  }
9542 
9543  if(res == VK_SUCCESS)
9544  {
9545  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
9546  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
9547 
9548  if(mapped)
9549  {
9550  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
9551  if(res != VK_SUCCESS)
9552  {
9553  return res;
9554  }
9555  }
9556 
9557  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
9558  VmaAllocationRequest allocRequest;
9559  if(pBlock->m_pMetadata->CreateAllocationRequest(
9560  currentFrameIndex,
9561  m_FrameInUseCount,
9562  m_BufferImageGranularity,
9563  size,
9564  alignment,
9565  isUpperAddress,
9566  suballocType,
9567  false, // canMakeOtherLost
9568  &allocRequest))
9569  {
9570  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9571  pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, isUpperAddress, *pAllocation);
9572  (*pAllocation)->InitBlockAllocation(
9573  hCurrentPool,
9574  pBlock,
9575  allocRequest.offset,
9576  alignment,
9577  size,
9578  suballocType,
9579  mapped,
9580  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9581  VMA_HEAVY_ASSERT(pBlock->Validate());
9582  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
9583  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9584  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9585  {
9586  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9587  }
9588  if(IsCorruptionDetectionEnabled())
9589  {
9590  res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, allocRequest.offset, size);
9591  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9592  }
9593  return VK_SUCCESS;
9594  }
9595  else
9596  {
9597  // Allocation from empty block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
9598  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9599  }
9600  }
9601  }
9602  }
9603 
9604  // 3. Try to allocate from existing blocks with making other allocations lost.
9605  if(canMakeOtherLost)
9606  {
9607  uint32_t tryIndex = 0;
9608  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
9609  {
9610  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
9611  VmaAllocationRequest bestRequest = {};
9612  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
9613 
9614  // 1. Search existing allocations.
9615  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9616  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9617  {
9618  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9619  VMA_ASSERT(pCurrBlock);
9620  VmaAllocationRequest currRequest = {};
9621  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9622  currentFrameIndex,
9623  m_FrameInUseCount,
9624  m_BufferImageGranularity,
9625  size,
9626  alignment,
9627  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
9628  suballocType,
9629  canMakeOtherLost,
9630  &currRequest))
9631  {
9632  const VkDeviceSize currRequestCost = currRequest.CalcCost();
9633  if(pBestRequestBlock == VMA_NULL ||
9634  currRequestCost < bestRequestCost)
9635  {
9636  pBestRequestBlock = pCurrBlock;
9637  bestRequest = currRequest;
9638  bestRequestCost = currRequestCost;
9639 
9640  if(bestRequestCost == 0)
9641  {
9642  break;
9643  }
9644  }
9645  }
9646  }
9647 
9648  if(pBestRequestBlock != VMA_NULL)
9649  {
9650  if(mapped)
9651  {
9652  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
9653  if(res != VK_SUCCESS)
9654  {
9655  return res;
9656  }
9657  }
9658 
9659  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
9660  currentFrameIndex,
9661  m_FrameInUseCount,
9662  &bestRequest))
9663  {
9664  // We no longer have an empty Allocation.
9665  if(pBestRequestBlock->m_pMetadata->IsEmpty())
9666  {
9667  m_HasEmptyBlock = false;
9668  }
9669  // Allocate from this pBlock.
9670  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9671  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
9672  (*pAllocation)->InitBlockAllocation(
9673  hCurrentPool,
9674  pBestRequestBlock,
9675  bestRequest.offset,
9676  alignment,
9677  size,
9678  suballocType,
9679  mapped,
9680  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9681  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
9682  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9683  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9684  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9685  {
9686  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9687  }
9688  if(IsCorruptionDetectionEnabled())
9689  {
9690  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
9691  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9692  }
9693  return VK_SUCCESS;
9694  }
9695  // else: Some allocations must have been touched while we are here. Next try.
9696  }
9697  else
9698  {
9699  // Could not find place in any of the blocks - break outer loop.
9700  break;
9701  }
9702  }
9703  /* Maximum number of tries exceeded - a very unlike event when many other
9704  threads are simultaneously touching allocations making it impossible to make
9705  lost at the same time as we try to allocate. */
9706  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
9707  {
9708  return VK_ERROR_TOO_MANY_OBJECTS;
9709  }
9710  }
9711 
9712  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9713 }
9714 
9715 void VmaBlockVector::Free(
9716  VmaAllocation hAllocation)
9717 {
9718  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
9719 
9720  // Scope for lock.
9721  {
9722  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9723 
9724  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9725 
9726  if(IsCorruptionDetectionEnabled())
9727  {
9728  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
9729  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
9730  }
9731 
9732  if(hAllocation->IsPersistentMap())
9733  {
9734  pBlock->Unmap(m_hAllocator, 1);
9735  }
9736 
9737  pBlock->m_pMetadata->Free(hAllocation);
9738  VMA_HEAVY_ASSERT(pBlock->Validate());
9739 
9740  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
9741 
9742  // pBlock became empty after this deallocation.
9743  if(pBlock->m_pMetadata->IsEmpty())
9744  {
9745  // Already has empty Allocation. We don't want to have two, so delete this one.
9746  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
9747  {
9748  pBlockToDelete = pBlock;
9749  Remove(pBlock);
9750  }
9751  // We now have first empty block.
9752  else
9753  {
9754  m_HasEmptyBlock = true;
9755  }
9756  }
9757  // pBlock didn't become empty, but we have another empty block - find and free that one.
9758  // (This is optional, heuristics.)
9759  else if(m_HasEmptyBlock)
9760  {
9761  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
9762  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
9763  {
9764  pBlockToDelete = pLastBlock;
9765  m_Blocks.pop_back();
9766  m_HasEmptyBlock = false;
9767  }
9768  }
9769 
9770  IncrementallySortBlocks();
9771  }
9772 
9773  // Destruction of a free Allocation. Deferred until this point, outside of mutex
9774  // lock, for performance reason.
9775  if(pBlockToDelete != VMA_NULL)
9776  {
9777  VMA_DEBUG_LOG(" Deleted empty allocation");
9778  pBlockToDelete->Destroy(m_hAllocator);
9779  vma_delete(m_hAllocator, pBlockToDelete);
9780  }
9781 }
9782 
9783 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
9784 {
9785  VkDeviceSize result = 0;
9786  for(size_t i = m_Blocks.size(); i--; )
9787  {
9788  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
9789  if(result >= m_PreferredBlockSize)
9790  {
9791  break;
9792  }
9793  }
9794  return result;
9795 }
9796 
9797 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
9798 {
9799  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9800  {
9801  if(m_Blocks[blockIndex] == pBlock)
9802  {
9803  VmaVectorRemove(m_Blocks, blockIndex);
9804  return;
9805  }
9806  }
9807  VMA_ASSERT(0);
9808 }
9809 
9810 void VmaBlockVector::IncrementallySortBlocks()
9811 {
9812  // Bubble sort only until first swap.
9813  for(size_t i = 1; i < m_Blocks.size(); ++i)
9814  {
9815  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
9816  {
9817  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
9818  return;
9819  }
9820  }
9821 }
9822 
9823 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
9824 {
9825  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
9826  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
9827  allocInfo.allocationSize = blockSize;
9828  VkDeviceMemory mem = VK_NULL_HANDLE;
9829  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
9830  if(res < 0)
9831  {
9832  return res;
9833  }
9834 
9835  // New VkDeviceMemory successfully created.
9836 
9837  // Create new Allocation for it.
9838  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
9839  pBlock->Init(
9840  m_hAllocator,
9841  m_MemoryTypeIndex,
9842  mem,
9843  allocInfo.allocationSize,
9844  m_NextBlockId++,
9845  m_LinearAlgorithm);
9846 
9847  m_Blocks.push_back(pBlock);
9848  if(pNewBlockIndex != VMA_NULL)
9849  {
9850  *pNewBlockIndex = m_Blocks.size() - 1;
9851  }
9852 
9853  return VK_SUCCESS;
9854 }
9855 
9856 #if VMA_STATS_STRING_ENABLED
9857 
9858 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
9859 {
9860  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9861 
9862  json.BeginObject();
9863 
9864  if(m_IsCustomPool)
9865  {
9866  json.WriteString("MemoryTypeIndex");
9867  json.WriteNumber(m_MemoryTypeIndex);
9868 
9869  json.WriteString("BlockSize");
9870  json.WriteNumber(m_PreferredBlockSize);
9871 
9872  json.WriteString("BlockCount");
9873  json.BeginObject(true);
9874  if(m_MinBlockCount > 0)
9875  {
9876  json.WriteString("Min");
9877  json.WriteNumber((uint64_t)m_MinBlockCount);
9878  }
9879  if(m_MaxBlockCount < SIZE_MAX)
9880  {
9881  json.WriteString("Max");
9882  json.WriteNumber((uint64_t)m_MaxBlockCount);
9883  }
9884  json.WriteString("Cur");
9885  json.WriteNumber((uint64_t)m_Blocks.size());
9886  json.EndObject();
9887 
9888  if(m_FrameInUseCount > 0)
9889  {
9890  json.WriteString("FrameInUseCount");
9891  json.WriteNumber(m_FrameInUseCount);
9892  }
9893 
9894  if(m_LinearAlgorithm)
9895  {
9896  json.WriteString("LinearAlgorithm");
9897  json.WriteBool(true);
9898  }
9899  }
9900  else
9901  {
9902  json.WriteString("PreferredBlockSize");
9903  json.WriteNumber(m_PreferredBlockSize);
9904  }
9905 
9906  json.WriteString("Blocks");
9907  json.BeginObject();
9908  for(size_t i = 0; i < m_Blocks.size(); ++i)
9909  {
9910  json.BeginString();
9911  json.ContinueString(m_Blocks[i]->GetId());
9912  json.EndString();
9913 
9914  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
9915  }
9916  json.EndObject();
9917 
9918  json.EndObject();
9919 }
9920 
9921 #endif // #if VMA_STATS_STRING_ENABLED
9922 
9923 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
9924  VmaAllocator hAllocator,
9925  uint32_t currentFrameIndex)
9926 {
9927  if(m_pDefragmentator == VMA_NULL)
9928  {
9929  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
9930  hAllocator,
9931  this,
9932  currentFrameIndex);
9933  }
9934 
9935  return m_pDefragmentator;
9936 }
9937 
9938 VkResult VmaBlockVector::Defragment(
9939  VmaDefragmentationStats* pDefragmentationStats,
9940  VkDeviceSize& maxBytesToMove,
9941  uint32_t& maxAllocationsToMove)
9942 {
9943  if(m_pDefragmentator == VMA_NULL)
9944  {
9945  return VK_SUCCESS;
9946  }
9947 
9948  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9949 
9950  // Defragment.
9951  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
9952 
9953  // Accumulate statistics.
9954  if(pDefragmentationStats != VMA_NULL)
9955  {
9956  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
9957  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
9958  pDefragmentationStats->bytesMoved += bytesMoved;
9959  pDefragmentationStats->allocationsMoved += allocationsMoved;
9960  VMA_ASSERT(bytesMoved <= maxBytesToMove);
9961  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
9962  maxBytesToMove -= bytesMoved;
9963  maxAllocationsToMove -= allocationsMoved;
9964  }
9965 
9966  // Free empty blocks.
9967  m_HasEmptyBlock = false;
9968  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
9969  {
9970  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
9971  if(pBlock->m_pMetadata->IsEmpty())
9972  {
9973  if(m_Blocks.size() > m_MinBlockCount)
9974  {
9975  if(pDefragmentationStats != VMA_NULL)
9976  {
9977  ++pDefragmentationStats->deviceMemoryBlocksFreed;
9978  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
9979  }
9980 
9981  VmaVectorRemove(m_Blocks, blockIndex);
9982  pBlock->Destroy(m_hAllocator);
9983  vma_delete(m_hAllocator, pBlock);
9984  }
9985  else
9986  {
9987  m_HasEmptyBlock = true;
9988  }
9989  }
9990  }
9991 
9992  return result;
9993 }
9994 
9995 void VmaBlockVector::DestroyDefragmentator()
9996 {
9997  if(m_pDefragmentator != VMA_NULL)
9998  {
9999  vma_delete(m_hAllocator, m_pDefragmentator);
10000  m_pDefragmentator = VMA_NULL;
10001  }
10002 }
10003 
10004 void VmaBlockVector::MakePoolAllocationsLost(
10005  uint32_t currentFrameIndex,
10006  size_t* pLostAllocationCount)
10007 {
10008  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10009  size_t lostAllocationCount = 0;
10010  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10011  {
10012  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10013  VMA_ASSERT(pBlock);
10014  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10015  }
10016  if(pLostAllocationCount != VMA_NULL)
10017  {
10018  *pLostAllocationCount = lostAllocationCount;
10019  }
10020 }
10021 
10022 VkResult VmaBlockVector::CheckCorruption()
10023 {
10024  if(!IsCorruptionDetectionEnabled())
10025  {
10026  return VK_ERROR_FEATURE_NOT_PRESENT;
10027  }
10028 
10029  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10030  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10031  {
10032  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10033  VMA_ASSERT(pBlock);
10034  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10035  if(res != VK_SUCCESS)
10036  {
10037  return res;
10038  }
10039  }
10040  return VK_SUCCESS;
10041 }
10042 
10043 void VmaBlockVector::AddStats(VmaStats* pStats)
10044 {
10045  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10046  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10047 
10048  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10049 
10050  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10051  {
10052  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10053  VMA_ASSERT(pBlock);
10054  VMA_HEAVY_ASSERT(pBlock->Validate());
10055  VmaStatInfo allocationStatInfo;
10056  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10057  VmaAddStatInfo(pStats->total, allocationStatInfo);
10058  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10059  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10060  }
10061 }
10062 
10064 // VmaDefragmentator members definition
10065 
10066 VmaDefragmentator::VmaDefragmentator(
10067  VmaAllocator hAllocator,
10068  VmaBlockVector* pBlockVector,
10069  uint32_t currentFrameIndex) :
10070  m_hAllocator(hAllocator),
10071  m_pBlockVector(pBlockVector),
10072  m_CurrentFrameIndex(currentFrameIndex),
10073  m_BytesMoved(0),
10074  m_AllocationsMoved(0),
10075  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
10076  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
10077 {
10078  VMA_ASSERT(!pBlockVector->UsesLinearAlgorithm());
10079 }
10080 
10081 VmaDefragmentator::~VmaDefragmentator()
10082 {
10083  for(size_t i = m_Blocks.size(); i--; )
10084  {
10085  vma_delete(m_hAllocator, m_Blocks[i]);
10086  }
10087 }
10088 
10089 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
10090 {
10091  AllocationInfo allocInfo;
10092  allocInfo.m_hAllocation = hAlloc;
10093  allocInfo.m_pChanged = pChanged;
10094  m_Allocations.push_back(allocInfo);
10095 }
10096 
10097 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
10098 {
10099  // It has already been mapped for defragmentation.
10100  if(m_pMappedDataForDefragmentation)
10101  {
10102  *ppMappedData = m_pMappedDataForDefragmentation;
10103  return VK_SUCCESS;
10104  }
10105 
10106  // It is originally mapped.
10107  if(m_pBlock->GetMappedData())
10108  {
10109  *ppMappedData = m_pBlock->GetMappedData();
10110  return VK_SUCCESS;
10111  }
10112 
10113  // Map on first usage.
10114  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
10115  *ppMappedData = m_pMappedDataForDefragmentation;
10116  return res;
10117 }
10118 
10119 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
10120 {
10121  if(m_pMappedDataForDefragmentation != VMA_NULL)
10122  {
10123  m_pBlock->Unmap(hAllocator, 1);
10124  }
10125 }
10126 
10127 VkResult VmaDefragmentator::DefragmentRound(
10128  VkDeviceSize maxBytesToMove,
10129  uint32_t maxAllocationsToMove)
10130 {
10131  if(m_Blocks.empty())
10132  {
10133  return VK_SUCCESS;
10134  }
10135 
10136  size_t srcBlockIndex = m_Blocks.size() - 1;
10137  size_t srcAllocIndex = SIZE_MAX;
10138  for(;;)
10139  {
10140  // 1. Find next allocation to move.
10141  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
10142  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
10143  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
10144  {
10145  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
10146  {
10147  // Finished: no more allocations to process.
10148  if(srcBlockIndex == 0)
10149  {
10150  return VK_SUCCESS;
10151  }
10152  else
10153  {
10154  --srcBlockIndex;
10155  srcAllocIndex = SIZE_MAX;
10156  }
10157  }
10158  else
10159  {
10160  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
10161  }
10162  }
10163 
10164  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
10165  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
10166 
10167  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
10168  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
10169  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
10170  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
10171 
10172  // 2. Try to find new place for this allocation in preceding or current block.
10173  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
10174  {
10175  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
10176  VmaAllocationRequest dstAllocRequest;
10177  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
10178  m_CurrentFrameIndex,
10179  m_pBlockVector->GetFrameInUseCount(),
10180  m_pBlockVector->GetBufferImageGranularity(),
10181  size,
10182  alignment,
10183  false, // upperAddress
10184  suballocType,
10185  false, // canMakeOtherLost
10186  &dstAllocRequest) &&
10187  MoveMakesSense(
10188  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
10189  {
10190  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
10191 
10192  // Reached limit on number of allocations or bytes to move.
10193  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
10194  (m_BytesMoved + size > maxBytesToMove))
10195  {
10196  return VK_INCOMPLETE;
10197  }
10198 
10199  void* pDstMappedData = VMA_NULL;
10200  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
10201  if(res != VK_SUCCESS)
10202  {
10203  return res;
10204  }
10205 
10206  void* pSrcMappedData = VMA_NULL;
10207  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
10208  if(res != VK_SUCCESS)
10209  {
10210  return res;
10211  }
10212 
10213  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
10214  memcpy(
10215  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
10216  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
10217  static_cast<size_t>(size));
10218 
10219  if(VMA_DEBUG_MARGIN > 0)
10220  {
10221  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
10222  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
10223  }
10224 
10225  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
10226  dstAllocRequest,
10227  suballocType,
10228  size,
10229  false, // upperAddress
10230  allocInfo.m_hAllocation);
10231  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
10232 
10233  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
10234 
10235  if(allocInfo.m_pChanged != VMA_NULL)
10236  {
10237  *allocInfo.m_pChanged = VK_TRUE;
10238  }
10239 
10240  ++m_AllocationsMoved;
10241  m_BytesMoved += size;
10242 
10243  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
10244 
10245  break;
10246  }
10247  }
10248 
10249  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
10250 
10251  if(srcAllocIndex > 0)
10252  {
10253  --srcAllocIndex;
10254  }
10255  else
10256  {
10257  if(srcBlockIndex > 0)
10258  {
10259  --srcBlockIndex;
10260  srcAllocIndex = SIZE_MAX;
10261  }
10262  else
10263  {
10264  return VK_SUCCESS;
10265  }
10266  }
10267  }
10268 }
10269 
10270 VkResult VmaDefragmentator::Defragment(
10271  VkDeviceSize maxBytesToMove,
10272  uint32_t maxAllocationsToMove)
10273 {
10274  if(m_Allocations.empty())
10275  {
10276  return VK_SUCCESS;
10277  }
10278 
10279  // Create block info for each block.
10280  const size_t blockCount = m_pBlockVector->m_Blocks.size();
10281  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10282  {
10283  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
10284  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
10285  m_Blocks.push_back(pBlockInfo);
10286  }
10287 
10288  // Sort them by m_pBlock pointer value.
10289  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
10290 
10291  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
10292  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
10293  {
10294  AllocationInfo& allocInfo = m_Allocations[blockIndex];
10295  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
10296  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
10297  {
10298  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
10299  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
10300  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
10301  {
10302  (*it)->m_Allocations.push_back(allocInfo);
10303  }
10304  else
10305  {
10306  VMA_ASSERT(0);
10307  }
10308  }
10309  }
10310  m_Allocations.clear();
10311 
10312  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10313  {
10314  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
10315  pBlockInfo->CalcHasNonMovableAllocations();
10316  pBlockInfo->SortAllocationsBySizeDescecnding();
10317  }
10318 
10319  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
10320  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
10321 
10322  // Execute defragmentation rounds (the main part).
10323  VkResult result = VK_SUCCESS;
10324  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
10325  {
10326  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
10327  }
10328 
10329  // Unmap blocks that were mapped for defragmentation.
10330  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10331  {
10332  m_Blocks[blockIndex]->Unmap(m_hAllocator);
10333  }
10334 
10335  return result;
10336 }
10337 
10338 bool VmaDefragmentator::MoveMakesSense(
10339  size_t dstBlockIndex, VkDeviceSize dstOffset,
10340  size_t srcBlockIndex, VkDeviceSize srcOffset)
10341 {
10342  if(dstBlockIndex < srcBlockIndex)
10343  {
10344  return true;
10345  }
10346  if(dstBlockIndex > srcBlockIndex)
10347  {
10348  return false;
10349  }
10350  if(dstOffset < srcOffset)
10351  {
10352  return true;
10353  }
10354  return false;
10355 }
10356 
10358 // VmaRecorder
10359 
10360 #if VMA_RECORDING_ENABLED
10361 
10362 VmaRecorder::VmaRecorder() :
10363  m_UseMutex(true),
10364  m_Flags(0),
10365  m_File(VMA_NULL),
10366  m_Freq(INT64_MAX),
10367  m_StartCounter(INT64_MAX)
10368 {
10369 }
10370 
10371 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
10372 {
10373  m_UseMutex = useMutex;
10374  m_Flags = settings.flags;
10375 
10376  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
10377  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
10378 
10379  // Open file for writing.
10380  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
10381  if(err != 0)
10382  {
10383  return VK_ERROR_INITIALIZATION_FAILED;
10384  }
10385 
10386  // Write header.
10387  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
10388  fprintf(m_File, "%s\n", "1,3");
10389 
10390  return VK_SUCCESS;
10391 }
10392 
10393 VmaRecorder::~VmaRecorder()
10394 {
10395  if(m_File != VMA_NULL)
10396  {
10397  fclose(m_File);
10398  }
10399 }
10400 
10401 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
10402 {
10403  CallParams callParams;
10404  GetBasicParams(callParams);
10405 
10406  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10407  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
10408  Flush();
10409 }
10410 
10411 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
10412 {
10413  CallParams callParams;
10414  GetBasicParams(callParams);
10415 
10416  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10417  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
10418  Flush();
10419 }
10420 
10421 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
10422 {
10423  CallParams callParams;
10424  GetBasicParams(callParams);
10425 
10426  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10427  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
10428  createInfo.memoryTypeIndex,
10429  createInfo.flags,
10430  createInfo.blockSize,
10431  createInfo.minBlockCount,
10432  createInfo.maxBlockCount,
10433  createInfo.frameInUseCount,
10434  pool);
10435  Flush();
10436 }
10437 
10438 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
10439 {
10440  CallParams callParams;
10441  GetBasicParams(callParams);
10442 
10443  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10444  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
10445  pool);
10446  Flush();
10447 }
10448 
10449 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
10450  const VkMemoryRequirements& vkMemReq,
10451  const VmaAllocationCreateInfo& createInfo,
10452  VmaAllocation allocation)
10453 {
10454  CallParams callParams;
10455  GetBasicParams(callParams);
10456 
10457  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10458  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10459  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10460  vkMemReq.size,
10461  vkMemReq.alignment,
10462  vkMemReq.memoryTypeBits,
10463  createInfo.flags,
10464  createInfo.usage,
10465  createInfo.requiredFlags,
10466  createInfo.preferredFlags,
10467  createInfo.memoryTypeBits,
10468  createInfo.pool,
10469  allocation,
10470  userDataStr.GetString());
10471  Flush();
10472 }
10473 
10474 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
10475  const VkMemoryRequirements& vkMemReq,
10476  bool requiresDedicatedAllocation,
10477  bool prefersDedicatedAllocation,
10478  const VmaAllocationCreateInfo& createInfo,
10479  VmaAllocation allocation)
10480 {
10481  CallParams callParams;
10482  GetBasicParams(callParams);
10483 
10484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10487  vkMemReq.size,
10488  vkMemReq.alignment,
10489  vkMemReq.memoryTypeBits,
10490  requiresDedicatedAllocation ? 1 : 0,
10491  prefersDedicatedAllocation ? 1 : 0,
10492  createInfo.flags,
10493  createInfo.usage,
10494  createInfo.requiredFlags,
10495  createInfo.preferredFlags,
10496  createInfo.memoryTypeBits,
10497  createInfo.pool,
10498  allocation,
10499  userDataStr.GetString());
10500  Flush();
10501 }
10502 
10503 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
10504  const VkMemoryRequirements& vkMemReq,
10505  bool requiresDedicatedAllocation,
10506  bool prefersDedicatedAllocation,
10507  const VmaAllocationCreateInfo& createInfo,
10508  VmaAllocation allocation)
10509 {
10510  CallParams callParams;
10511  GetBasicParams(callParams);
10512 
10513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10514  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10515  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10516  vkMemReq.size,
10517  vkMemReq.alignment,
10518  vkMemReq.memoryTypeBits,
10519  requiresDedicatedAllocation ? 1 : 0,
10520  prefersDedicatedAllocation ? 1 : 0,
10521  createInfo.flags,
10522  createInfo.usage,
10523  createInfo.requiredFlags,
10524  createInfo.preferredFlags,
10525  createInfo.memoryTypeBits,
10526  createInfo.pool,
10527  allocation,
10528  userDataStr.GetString());
10529  Flush();
10530 }
10531 
10532 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
10533  VmaAllocation allocation)
10534 {
10535  CallParams callParams;
10536  GetBasicParams(callParams);
10537 
10538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10539  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10540  allocation);
10541  Flush();
10542 }
10543 
10544 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
10545  VmaAllocation allocation,
10546  const void* pUserData)
10547 {
10548  CallParams callParams;
10549  GetBasicParams(callParams);
10550 
10551  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10552  UserDataString userDataStr(
10553  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
10554  pUserData);
10555  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10556  allocation,
10557  userDataStr.GetString());
10558  Flush();
10559 }
10560 
10561 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
10562  VmaAllocation allocation)
10563 {
10564  CallParams callParams;
10565  GetBasicParams(callParams);
10566 
10567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10568  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10569  allocation);
10570  Flush();
10571 }
10572 
10573 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
10574  VmaAllocation allocation)
10575 {
10576  CallParams callParams;
10577  GetBasicParams(callParams);
10578 
10579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10580  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10581  allocation);
10582  Flush();
10583 }
10584 
10585 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
10586  VmaAllocation allocation)
10587 {
10588  CallParams callParams;
10589  GetBasicParams(callParams);
10590 
10591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10592  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10593  allocation);
10594  Flush();
10595 }
10596 
10597 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
10598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10599 {
10600  CallParams callParams;
10601  GetBasicParams(callParams);
10602 
10603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10604  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10605  allocation,
10606  offset,
10607  size);
10608  Flush();
10609 }
10610 
10611 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
10612  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10613 {
10614  CallParams callParams;
10615  GetBasicParams(callParams);
10616 
10617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10618  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10619  allocation,
10620  offset,
10621  size);
10622  Flush();
10623 }
10624 
10625 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
10626  const VkBufferCreateInfo& bufCreateInfo,
10627  const VmaAllocationCreateInfo& allocCreateInfo,
10628  VmaAllocation allocation)
10629 {
10630  CallParams callParams;
10631  GetBasicParams(callParams);
10632 
10633  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10634  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10635  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10636  bufCreateInfo.flags,
10637  bufCreateInfo.size,
10638  bufCreateInfo.usage,
10639  bufCreateInfo.sharingMode,
10640  allocCreateInfo.flags,
10641  allocCreateInfo.usage,
10642  allocCreateInfo.requiredFlags,
10643  allocCreateInfo.preferredFlags,
10644  allocCreateInfo.memoryTypeBits,
10645  allocCreateInfo.pool,
10646  allocation,
10647  userDataStr.GetString());
10648  Flush();
10649 }
10650 
10651 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
10652  const VkImageCreateInfo& imageCreateInfo,
10653  const VmaAllocationCreateInfo& allocCreateInfo,
10654  VmaAllocation allocation)
10655 {
10656  CallParams callParams;
10657  GetBasicParams(callParams);
10658 
10659  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10660  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10661  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10662  imageCreateInfo.flags,
10663  imageCreateInfo.imageType,
10664  imageCreateInfo.format,
10665  imageCreateInfo.extent.width,
10666  imageCreateInfo.extent.height,
10667  imageCreateInfo.extent.depth,
10668  imageCreateInfo.mipLevels,
10669  imageCreateInfo.arrayLayers,
10670  imageCreateInfo.samples,
10671  imageCreateInfo.tiling,
10672  imageCreateInfo.usage,
10673  imageCreateInfo.sharingMode,
10674  imageCreateInfo.initialLayout,
10675  allocCreateInfo.flags,
10676  allocCreateInfo.usage,
10677  allocCreateInfo.requiredFlags,
10678  allocCreateInfo.preferredFlags,
10679  allocCreateInfo.memoryTypeBits,
10680  allocCreateInfo.pool,
10681  allocation,
10682  userDataStr.GetString());
10683  Flush();
10684 }
10685 
10686 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
10687  VmaAllocation allocation)
10688 {
10689  CallParams callParams;
10690  GetBasicParams(callParams);
10691 
10692  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10693  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
10694  allocation);
10695  Flush();
10696 }
10697 
10698 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
10699  VmaAllocation allocation)
10700 {
10701  CallParams callParams;
10702  GetBasicParams(callParams);
10703 
10704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10705  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
10706  allocation);
10707  Flush();
10708 }
10709 
10710 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
10711  VmaAllocation allocation)
10712 {
10713  CallParams callParams;
10714  GetBasicParams(callParams);
10715 
10716  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10717  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10718  allocation);
10719  Flush();
10720 }
10721 
10722 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
10723  VmaAllocation allocation)
10724 {
10725  CallParams callParams;
10726  GetBasicParams(callParams);
10727 
10728  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10729  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
10730  allocation);
10731  Flush();
10732 }
10733 
10734 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
10735  VmaPool pool)
10736 {
10737  CallParams callParams;
10738  GetBasicParams(callParams);
10739 
10740  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10741  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
10742  pool);
10743  Flush();
10744 }
10745 
10746 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
10747 {
10748  if(pUserData != VMA_NULL)
10749  {
10750  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
10751  {
10752  m_Str = (const char*)pUserData;
10753  }
10754  else
10755  {
10756  sprintf_s(m_PtrStr, "%p", pUserData);
10757  m_Str = m_PtrStr;
10758  }
10759  }
10760  else
10761  {
10762  m_Str = "";
10763  }
10764 }
10765 
10766 void VmaRecorder::WriteConfiguration(
10767  const VkPhysicalDeviceProperties& devProps,
10768  const VkPhysicalDeviceMemoryProperties& memProps,
10769  bool dedicatedAllocationExtensionEnabled)
10770 {
10771  fprintf(m_File, "Config,Begin\n");
10772 
10773  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
10774  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
10775  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
10776  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
10777  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
10778  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
10779 
10780  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
10781  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
10782  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
10783 
10784  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
10785  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
10786  {
10787  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
10788  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
10789  }
10790  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
10791  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
10792  {
10793  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
10794  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
10795  }
10796 
10797  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
10798 
10799  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
10800  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
10801  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
10802  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
10803  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
10804  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
10805  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
10806  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
10807  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10808 
10809  fprintf(m_File, "Config,End\n");
10810 }
10811 
10812 void VmaRecorder::GetBasicParams(CallParams& outParams)
10813 {
10814  outParams.threadId = GetCurrentThreadId();
10815 
10816  LARGE_INTEGER counter;
10817  QueryPerformanceCounter(&counter);
10818  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
10819 }
10820 
10821 void VmaRecorder::Flush()
10822 {
10823  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
10824  {
10825  fflush(m_File);
10826  }
10827 }
10828 
10829 #endif // #if VMA_RECORDING_ENABLED
10830 
10832 // VmaAllocator_T
10833 
10834 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
10835  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
10836  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
10837  m_hDevice(pCreateInfo->device),
10838  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
10839  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
10840  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
10841  m_PreferredLargeHeapBlockSize(0),
10842  m_PhysicalDevice(pCreateInfo->physicalDevice),
10843  m_CurrentFrameIndex(0),
10844  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
10845  m_NextPoolId(0)
10847  ,m_pRecorder(VMA_NULL)
10848 #endif
10849 {
10850  if(VMA_DEBUG_DETECT_CORRUPTION)
10851  {
10852  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
10853  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
10854  }
10855 
10856  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
10857 
10858 #if !(VMA_DEDICATED_ALLOCATION)
10860  {
10861  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
10862  }
10863 #endif
10864 
10865  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
10866  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
10867  memset(&m_MemProps, 0, sizeof(m_MemProps));
10868 
10869  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
10870  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
10871 
10872  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
10873  {
10874  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
10875  }
10876 
10877  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
10878  {
10879  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
10880  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
10881  }
10882 
10883  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
10884 
10885  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
10886  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
10887 
10888  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
10889  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10890 
10891  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
10892  {
10893  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
10894  {
10895  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
10896  if(limit != VK_WHOLE_SIZE)
10897  {
10898  m_HeapSizeLimit[heapIndex] = limit;
10899  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
10900  {
10901  m_MemProps.memoryHeaps[heapIndex].size = limit;
10902  }
10903  }
10904  }
10905  }
10906 
10907  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
10908  {
10909  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
10910 
10911  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
10912  this,
10913  memTypeIndex,
10914  preferredBlockSize,
10915  0,
10916  SIZE_MAX,
10917  GetBufferImageGranularity(),
10918  pCreateInfo->frameInUseCount,
10919  false, // isCustomPool
10920  false); // linearAlgorithm
10921  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
10922  // becase minBlockCount is 0.
10923  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
10924 
10925  }
10926 }
10927 
10928 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
10929 {
10930  VkResult res = VK_SUCCESS;
10931 
10932  if(pCreateInfo->pRecordSettings != VMA_NULL &&
10933  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
10934  {
10935 #if VMA_RECORDING_ENABLED
10936  m_pRecorder = vma_new(this, VmaRecorder)();
10937  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
10938  if(res != VK_SUCCESS)
10939  {
10940  return res;
10941  }
10942  m_pRecorder->WriteConfiguration(
10943  m_PhysicalDeviceProperties,
10944  m_MemProps,
10945  m_UseKhrDedicatedAllocation);
10946  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
10947 #else
10948  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
10949  return VK_ERROR_FEATURE_NOT_PRESENT;
10950 #endif
10951  }
10952 
10953  return res;
10954 }
10955 
10956 VmaAllocator_T::~VmaAllocator_T()
10957 {
10958 #if VMA_RECORDING_ENABLED
10959  if(m_pRecorder != VMA_NULL)
10960  {
10961  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
10962  vma_delete(this, m_pRecorder);
10963  }
10964 #endif
10965 
10966  VMA_ASSERT(m_Pools.empty());
10967 
10968  for(size_t i = GetMemoryTypeCount(); i--; )
10969  {
10970  vma_delete(this, m_pDedicatedAllocations[i]);
10971  vma_delete(this, m_pBlockVectors[i]);
10972  }
10973 }
10974 
10975 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
10976 {
10977 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
10978  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
10979  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
10980  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
10981  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
10982  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
10983  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
10984  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
10985  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
10986  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
10987  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
10988  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
10989  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
10990  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
10991  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
10992  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
10993  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
10994 #if VMA_DEDICATED_ALLOCATION
10995  if(m_UseKhrDedicatedAllocation)
10996  {
10997  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
10998  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
10999  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11000  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11001  }
11002 #endif // #if VMA_DEDICATED_ALLOCATION
11003 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11004 
11005 #define VMA_COPY_IF_NOT_NULL(funcName) \
11006  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11007 
11008  if(pVulkanFunctions != VMA_NULL)
11009  {
11010  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11011  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11012  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11013  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11014  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11015  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11016  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11017  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11018  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11019  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11020  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11021  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11022  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11023  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11024  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11025  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11026 #if VMA_DEDICATED_ALLOCATION
11027  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11028  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11029 #endif
11030  }
11031 
11032 #undef VMA_COPY_IF_NOT_NULL
11033 
11034  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11035  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11036  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11037  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11038  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11039  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11040  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11041  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11042  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11043  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11044  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11045  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11046  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11047  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11048  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11049  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11050  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11051  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11052 #if VMA_DEDICATED_ALLOCATION
11053  if(m_UseKhrDedicatedAllocation)
11054  {
11055  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
11056  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
11057  }
11058 #endif
11059 }
11060 
11061 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
11062 {
11063  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11064  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
11065  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
11066  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
11067 }
11068 
11069 VkResult VmaAllocator_T::AllocateMemoryOfType(
11070  VkDeviceSize size,
11071  VkDeviceSize alignment,
11072  bool dedicatedAllocation,
11073  VkBuffer dedicatedBuffer,
11074  VkImage dedicatedImage,
11075  const VmaAllocationCreateInfo& createInfo,
11076  uint32_t memTypeIndex,
11077  VmaSuballocationType suballocType,
11078  VmaAllocation* pAllocation)
11079 {
11080  VMA_ASSERT(pAllocation != VMA_NULL);
11081  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
11082 
11083  VmaAllocationCreateInfo finalCreateInfo = createInfo;
11084 
11085  // If memory type is not HOST_VISIBLE, disable MAPPED.
11086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11087  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
11088  {
11089  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
11090  }
11091 
11092  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
11093  VMA_ASSERT(blockVector);
11094 
11095  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
11096  bool preferDedicatedMemory =
11097  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
11098  dedicatedAllocation ||
11099  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
11100  size > preferredBlockSize / 2;
11101 
11102  if(preferDedicatedMemory &&
11103  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
11104  finalCreateInfo.pool == VK_NULL_HANDLE)
11105  {
11107  }
11108 
11109  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
11110  {
11111  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11112  {
11113  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11114  }
11115  else
11116  {
11117  return AllocateDedicatedMemory(
11118  size,
11119  suballocType,
11120  memTypeIndex,
11121  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11122  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11123  finalCreateInfo.pUserData,
11124  dedicatedBuffer,
11125  dedicatedImage,
11126  pAllocation);
11127  }
11128  }
11129  else
11130  {
11131  VkResult res = blockVector->Allocate(
11132  VK_NULL_HANDLE, // hCurrentPool
11133  m_CurrentFrameIndex.load(),
11134  size,
11135  alignment,
11136  finalCreateInfo,
11137  suballocType,
11138  pAllocation);
11139  if(res == VK_SUCCESS)
11140  {
11141  return res;
11142  }
11143 
11144  // 5. Try dedicated memory.
11145  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11146  {
11147  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11148  }
11149  else
11150  {
11151  res = AllocateDedicatedMemory(
11152  size,
11153  suballocType,
11154  memTypeIndex,
11155  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11156  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11157  finalCreateInfo.pUserData,
11158  dedicatedBuffer,
11159  dedicatedImage,
11160  pAllocation);
11161  if(res == VK_SUCCESS)
11162  {
11163  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
11164  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
11165  return VK_SUCCESS;
11166  }
11167  else
11168  {
11169  // Everything failed: Return error code.
11170  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11171  return res;
11172  }
11173  }
11174  }
11175 }
11176 
11177 VkResult VmaAllocator_T::AllocateDedicatedMemory(
11178  VkDeviceSize size,
11179  VmaSuballocationType suballocType,
11180  uint32_t memTypeIndex,
11181  bool map,
11182  bool isUserDataString,
11183  void* pUserData,
11184  VkBuffer dedicatedBuffer,
11185  VkImage dedicatedImage,
11186  VmaAllocation* pAllocation)
11187 {
11188  VMA_ASSERT(pAllocation);
11189 
11190  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11191  allocInfo.memoryTypeIndex = memTypeIndex;
11192  allocInfo.allocationSize = size;
11193 
11194 #if VMA_DEDICATED_ALLOCATION
11195  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
11196  if(m_UseKhrDedicatedAllocation)
11197  {
11198  if(dedicatedBuffer != VK_NULL_HANDLE)
11199  {
11200  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
11201  dedicatedAllocInfo.buffer = dedicatedBuffer;
11202  allocInfo.pNext = &dedicatedAllocInfo;
11203  }
11204  else if(dedicatedImage != VK_NULL_HANDLE)
11205  {
11206  dedicatedAllocInfo.image = dedicatedImage;
11207  allocInfo.pNext = &dedicatedAllocInfo;
11208  }
11209  }
11210 #endif // #if VMA_DEDICATED_ALLOCATION
11211 
11212  // Allocate VkDeviceMemory.
11213  VkDeviceMemory hMemory = VK_NULL_HANDLE;
11214  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
11215  if(res < 0)
11216  {
11217  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11218  return res;
11219  }
11220 
11221  void* pMappedData = VMA_NULL;
11222  if(map)
11223  {
11224  res = (*m_VulkanFunctions.vkMapMemory)(
11225  m_hDevice,
11226  hMemory,
11227  0,
11228  VK_WHOLE_SIZE,
11229  0,
11230  &pMappedData);
11231  if(res < 0)
11232  {
11233  VMA_DEBUG_LOG(" vkMapMemory FAILED");
11234  FreeVulkanMemory(memTypeIndex, size, hMemory);
11235  return res;
11236  }
11237  }
11238 
11239  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
11240  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
11241  (*pAllocation)->SetUserData(this, pUserData);
11242  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11243  {
11244  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11245  }
11246 
11247  // Register it in m_pDedicatedAllocations.
11248  {
11249  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11250  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
11251  VMA_ASSERT(pDedicatedAllocations);
11252  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
11253  }
11254 
11255  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
11256 
11257  return VK_SUCCESS;
11258 }
11259 
11260 void VmaAllocator_T::GetBufferMemoryRequirements(
11261  VkBuffer hBuffer,
11262  VkMemoryRequirements& memReq,
11263  bool& requiresDedicatedAllocation,
11264  bool& prefersDedicatedAllocation) const
11265 {
11266 #if VMA_DEDICATED_ALLOCATION
11267  if(m_UseKhrDedicatedAllocation)
11268  {
11269  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
11270  memReqInfo.buffer = hBuffer;
11271 
11272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11273 
11274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11275  memReq2.pNext = &memDedicatedReq;
11276 
11277  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11278 
11279  memReq = memReq2.memoryRequirements;
11280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11282  }
11283  else
11284 #endif // #if VMA_DEDICATED_ALLOCATION
11285  {
11286  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
11287  requiresDedicatedAllocation = false;
11288  prefersDedicatedAllocation = false;
11289  }
11290 }
11291 
11292 void VmaAllocator_T::GetImageMemoryRequirements(
11293  VkImage hImage,
11294  VkMemoryRequirements& memReq,
11295  bool& requiresDedicatedAllocation,
11296  bool& prefersDedicatedAllocation) const
11297 {
11298 #if VMA_DEDICATED_ALLOCATION
11299  if(m_UseKhrDedicatedAllocation)
11300  {
11301  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
11302  memReqInfo.image = hImage;
11303 
11304  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11305 
11306  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11307  memReq2.pNext = &memDedicatedReq;
11308 
11309  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11310 
11311  memReq = memReq2.memoryRequirements;
11312  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11313  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11314  }
11315  else
11316 #endif // #if VMA_DEDICATED_ALLOCATION
11317  {
11318  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
11319  requiresDedicatedAllocation = false;
11320  prefersDedicatedAllocation = false;
11321  }
11322 }
11323 
11324 VkResult VmaAllocator_T::AllocateMemory(
11325  const VkMemoryRequirements& vkMemReq,
11326  bool requiresDedicatedAllocation,
11327  bool prefersDedicatedAllocation,
11328  VkBuffer dedicatedBuffer,
11329  VkImage dedicatedImage,
11330  const VmaAllocationCreateInfo& createInfo,
11331  VmaSuballocationType suballocType,
11332  VmaAllocation* pAllocation)
11333 {
11334  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
11335  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11336  {
11337  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
11338  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11339  }
11340  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11342  {
11343  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
11344  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11345  }
11346  if(requiresDedicatedAllocation)
11347  {
11348  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11349  {
11350  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
11351  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11352  }
11353  if(createInfo.pool != VK_NULL_HANDLE)
11354  {
11355  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
11356  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11357  }
11358  }
11359  if((createInfo.pool != VK_NULL_HANDLE) &&
11360  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
11361  {
11362  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
11363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11364  }
11365 
11366  if(createInfo.pool != VK_NULL_HANDLE)
11367  {
11368  const VkDeviceSize alignmentForPool = VMA_MAX(
11369  vkMemReq.alignment,
11370  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
11371  return createInfo.pool->m_BlockVector.Allocate(
11372  createInfo.pool,
11373  m_CurrentFrameIndex.load(),
11374  vkMemReq.size,
11375  alignmentForPool,
11376  createInfo,
11377  suballocType,
11378  pAllocation);
11379  }
11380  else
11381  {
11382  // Bit mask of memory Vulkan types acceptable for this allocation.
11383  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
11384  uint32_t memTypeIndex = UINT32_MAX;
11385  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11386  if(res == VK_SUCCESS)
11387  {
11388  VkDeviceSize alignmentForMemType = VMA_MAX(
11389  vkMemReq.alignment,
11390  GetMemoryTypeMinAlignment(memTypeIndex));
11391 
11392  res = AllocateMemoryOfType(
11393  vkMemReq.size,
11394  alignmentForMemType,
11395  requiresDedicatedAllocation || prefersDedicatedAllocation,
11396  dedicatedBuffer,
11397  dedicatedImage,
11398  createInfo,
11399  memTypeIndex,
11400  suballocType,
11401  pAllocation);
11402  // Succeeded on first try.
11403  if(res == VK_SUCCESS)
11404  {
11405  return res;
11406  }
11407  // Allocation from this memory type failed. Try other compatible memory types.
11408  else
11409  {
11410  for(;;)
11411  {
11412  // Remove old memTypeIndex from list of possibilities.
11413  memoryTypeBits &= ~(1u << memTypeIndex);
11414  // Find alternative memTypeIndex.
11415  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11416  if(res == VK_SUCCESS)
11417  {
11418  alignmentForMemType = VMA_MAX(
11419  vkMemReq.alignment,
11420  GetMemoryTypeMinAlignment(memTypeIndex));
11421 
11422  res = AllocateMemoryOfType(
11423  vkMemReq.size,
11424  alignmentForMemType,
11425  requiresDedicatedAllocation || prefersDedicatedAllocation,
11426  dedicatedBuffer,
11427  dedicatedImage,
11428  createInfo,
11429  memTypeIndex,
11430  suballocType,
11431  pAllocation);
11432  // Allocation from this alternative memory type succeeded.
11433  if(res == VK_SUCCESS)
11434  {
11435  return res;
11436  }
11437  // else: Allocation from this memory type failed. Try next one - next loop iteration.
11438  }
11439  // No other matching memory type index could be found.
11440  else
11441  {
11442  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11444  }
11445  }
11446  }
11447  }
11448  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
11449  else
11450  return res;
11451  }
11452 }
11453 
11454 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
11455 {
11456  VMA_ASSERT(allocation);
11457 
11458  if(allocation->CanBecomeLost() == false ||
11459  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11460  {
11461  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11462  {
11463  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
11464  }
11465 
11466  switch(allocation->GetType())
11467  {
11468  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11469  {
11470  VmaBlockVector* pBlockVector = VMA_NULL;
11471  VmaPool hPool = allocation->GetPool();
11472  if(hPool != VK_NULL_HANDLE)
11473  {
11474  pBlockVector = &hPool->m_BlockVector;
11475  }
11476  else
11477  {
11478  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
11479  pBlockVector = m_pBlockVectors[memTypeIndex];
11480  }
11481  pBlockVector->Free(allocation);
11482  }
11483  break;
11484  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
11485  FreeDedicatedMemory(allocation);
11486  break;
11487  default:
11488  VMA_ASSERT(0);
11489  }
11490  }
11491 
11492  allocation->SetUserData(this, VMA_NULL);
11493  vma_delete(this, allocation);
11494 }
11495 
11496 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
11497 {
11498  // Initialize.
11499  InitStatInfo(pStats->total);
11500  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
11501  InitStatInfo(pStats->memoryType[i]);
11502  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11503  InitStatInfo(pStats->memoryHeap[i]);
11504 
11505  // Process default pools.
11506  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11507  {
11508  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11509  VMA_ASSERT(pBlockVector);
11510  pBlockVector->AddStats(pStats);
11511  }
11512 
11513  // Process custom pools.
11514  {
11515  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11516  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11517  {
11518  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
11519  }
11520  }
11521 
11522  // Process dedicated allocations.
11523  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11524  {
11525  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11526  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11527  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
11528  VMA_ASSERT(pDedicatedAllocVector);
11529  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
11530  {
11531  VmaStatInfo allocationStatInfo;
11532  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
11533  VmaAddStatInfo(pStats->total, allocationStatInfo);
11534  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11535  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11536  }
11537  }
11538 
11539  // Postprocess.
11540  VmaPostprocessCalcStatInfo(pStats->total);
11541  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
11542  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
11543  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
11544  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
11545 }
11546 
11547 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
11548 
11549 VkResult VmaAllocator_T::Defragment(
11550  VmaAllocation* pAllocations,
11551  size_t allocationCount,
11552  VkBool32* pAllocationsChanged,
11553  const VmaDefragmentationInfo* pDefragmentationInfo,
11554  VmaDefragmentationStats* pDefragmentationStats)
11555 {
11556  if(pAllocationsChanged != VMA_NULL)
11557  {
11558  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
11559  }
11560  if(pDefragmentationStats != VMA_NULL)
11561  {
11562  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
11563  }
11564 
11565  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
11566 
11567  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
11568 
11569  const size_t poolCount = m_Pools.size();
11570 
11571  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
11572  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11573  {
11574  VmaAllocation hAlloc = pAllocations[allocIndex];
11575  VMA_ASSERT(hAlloc);
11576  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
11577  // DedicatedAlloc cannot be defragmented.
11578  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11579  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
11580  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
11581  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
11582  // Lost allocation cannot be defragmented.
11583  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
11584  {
11585  VmaBlockVector* pAllocBlockVector = VMA_NULL;
11586 
11587  const VmaPool hAllocPool = hAlloc->GetPool();
11588  // This allocation belongs to custom pool.
11589  if(hAllocPool != VK_NULL_HANDLE)
11590  {
11591  // Pools with linear algorithm are not defragmented.
11592  if(!hAllocPool->m_BlockVector.UsesLinearAlgorithm())
11593  {
11594  pAllocBlockVector = &hAllocPool->m_BlockVector;
11595  }
11596  }
11597  // This allocation belongs to general pool.
11598  else
11599  {
11600  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
11601  }
11602 
11603  if(pAllocBlockVector != VMA_NULL)
11604  {
11605  VmaDefragmentator* const pDefragmentator =
11606  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
11607  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
11608  &pAllocationsChanged[allocIndex] : VMA_NULL;
11609  pDefragmentator->AddAllocation(hAlloc, pChanged);
11610  }
11611  }
11612  }
11613 
11614  VkResult result = VK_SUCCESS;
11615 
11616  // ======== Main processing.
11617 
11618  VkDeviceSize maxBytesToMove = SIZE_MAX;
11619  uint32_t maxAllocationsToMove = UINT32_MAX;
11620  if(pDefragmentationInfo != VMA_NULL)
11621  {
11622  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
11623  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
11624  }
11625 
11626  // Process standard memory.
11627  for(uint32_t memTypeIndex = 0;
11628  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
11629  ++memTypeIndex)
11630  {
11631  // Only HOST_VISIBLE memory types can be defragmented.
11632  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11633  {
11634  result = m_pBlockVectors[memTypeIndex]->Defragment(
11635  pDefragmentationStats,
11636  maxBytesToMove,
11637  maxAllocationsToMove);
11638  }
11639  }
11640 
11641  // Process custom pools.
11642  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
11643  {
11644  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
11645  pDefragmentationStats,
11646  maxBytesToMove,
11647  maxAllocationsToMove);
11648  }
11649 
11650  // ======== Destroy defragmentators.
11651 
11652  // Process custom pools.
11653  for(size_t poolIndex = poolCount; poolIndex--; )
11654  {
11655  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
11656  }
11657 
11658  // Process standard memory.
11659  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
11660  {
11661  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11662  {
11663  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
11664  }
11665  }
11666 
11667  return result;
11668 }
11669 
11670 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
11671 {
11672  if(hAllocation->CanBecomeLost())
11673  {
11674  /*
11675  Warning: This is a carefully designed algorithm.
11676  Do not modify unless you really know what you're doing :)
11677  */
11678  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11679  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11680  for(;;)
11681  {
11682  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11683  {
11684  pAllocationInfo->memoryType = UINT32_MAX;
11685  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
11686  pAllocationInfo->offset = 0;
11687  pAllocationInfo->size = hAllocation->GetSize();
11688  pAllocationInfo->pMappedData = VMA_NULL;
11689  pAllocationInfo->pUserData = hAllocation->GetUserData();
11690  return;
11691  }
11692  else if(localLastUseFrameIndex == localCurrFrameIndex)
11693  {
11694  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11695  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11696  pAllocationInfo->offset = hAllocation->GetOffset();
11697  pAllocationInfo->size = hAllocation->GetSize();
11698  pAllocationInfo->pMappedData = VMA_NULL;
11699  pAllocationInfo->pUserData = hAllocation->GetUserData();
11700  return;
11701  }
11702  else // Last use time earlier than current time.
11703  {
11704  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11705  {
11706  localLastUseFrameIndex = localCurrFrameIndex;
11707  }
11708  }
11709  }
11710  }
11711  else
11712  {
11713 #if VMA_STATS_STRING_ENABLED
11714  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11715  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11716  for(;;)
11717  {
11718  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11719  if(localLastUseFrameIndex == localCurrFrameIndex)
11720  {
11721  break;
11722  }
11723  else // Last use time earlier than current time.
11724  {
11725  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11726  {
11727  localLastUseFrameIndex = localCurrFrameIndex;
11728  }
11729  }
11730  }
11731 #endif
11732 
11733  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11734  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11735  pAllocationInfo->offset = hAllocation->GetOffset();
11736  pAllocationInfo->size = hAllocation->GetSize();
11737  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
11738  pAllocationInfo->pUserData = hAllocation->GetUserData();
11739  }
11740 }
11741 
11742 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
11743 {
11744  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
11745  if(hAllocation->CanBecomeLost())
11746  {
11747  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11748  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11749  for(;;)
11750  {
11751  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11752  {
11753  return false;
11754  }
11755  else if(localLastUseFrameIndex == localCurrFrameIndex)
11756  {
11757  return true;
11758  }
11759  else // Last use time earlier than current time.
11760  {
11761  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11762  {
11763  localLastUseFrameIndex = localCurrFrameIndex;
11764  }
11765  }
11766  }
11767  }
11768  else
11769  {
11770 #if VMA_STATS_STRING_ENABLED
11771  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11772  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11773  for(;;)
11774  {
11775  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11776  if(localLastUseFrameIndex == localCurrFrameIndex)
11777  {
11778  break;
11779  }
11780  else // Last use time earlier than current time.
11781  {
11782  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11783  {
11784  localLastUseFrameIndex = localCurrFrameIndex;
11785  }
11786  }
11787  }
11788 #endif
11789 
11790  return true;
11791  }
11792 }
11793 
11794 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
11795 {
11796  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
11797 
11798  const bool isLinearAlgorithm = (pCreateInfo->flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0;
11799 
11800  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
11801 
11802  if(newCreateInfo.maxBlockCount == 0)
11803  {
11804  newCreateInfo.maxBlockCount = isLinearAlgorithm ? 1 : SIZE_MAX;
11805  }
11806  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount ||
11807  isLinearAlgorithm && newCreateInfo.maxBlockCount > 1)
11808  {
11809  return VK_ERROR_INITIALIZATION_FAILED;
11810  }
11811  if(newCreateInfo.blockSize == 0)
11812  {
11813  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
11814  }
11815 
11816  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
11817 
11818  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
11819  if(res != VK_SUCCESS)
11820  {
11821  vma_delete(this, *pPool);
11822  *pPool = VMA_NULL;
11823  return res;
11824  }
11825 
11826  // Add to m_Pools.
11827  {
11828  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11829  (*pPool)->SetId(m_NextPoolId++);
11830  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
11831  }
11832 
11833  return VK_SUCCESS;
11834 }
11835 
11836 void VmaAllocator_T::DestroyPool(VmaPool pool)
11837 {
11838  // Remove from m_Pools.
11839  {
11840  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11841  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
11842  VMA_ASSERT(success && "Pool not found in Allocator.");
11843  }
11844 
11845  vma_delete(this, pool);
11846 }
11847 
11848 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
11849 {
11850  pool->m_BlockVector.GetPoolStats(pPoolStats);
11851 }
11852 
11853 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
11854 {
11855  m_CurrentFrameIndex.store(frameIndex);
11856 }
11857 
11858 void VmaAllocator_T::MakePoolAllocationsLost(
11859  VmaPool hPool,
11860  size_t* pLostAllocationCount)
11861 {
11862  hPool->m_BlockVector.MakePoolAllocationsLost(
11863  m_CurrentFrameIndex.load(),
11864  pLostAllocationCount);
11865 }
11866 
11867 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
11868 {
11869  return hPool->m_BlockVector.CheckCorruption();
11870 }
11871 
11872 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
11873 {
11874  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
11875 
11876  // Process default pools.
11877  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11878  {
11879  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
11880  {
11881  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11882  VMA_ASSERT(pBlockVector);
11883  VkResult localRes = pBlockVector->CheckCorruption();
11884  switch(localRes)
11885  {
11886  case VK_ERROR_FEATURE_NOT_PRESENT:
11887  break;
11888  case VK_SUCCESS:
11889  finalRes = VK_SUCCESS;
11890  break;
11891  default:
11892  return localRes;
11893  }
11894  }
11895  }
11896 
11897  // Process custom pools.
11898  {
11899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11900  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11901  {
11902  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
11903  {
11904  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
11905  switch(localRes)
11906  {
11907  case VK_ERROR_FEATURE_NOT_PRESENT:
11908  break;
11909  case VK_SUCCESS:
11910  finalRes = VK_SUCCESS;
11911  break;
11912  default:
11913  return localRes;
11914  }
11915  }
11916  }
11917  }
11918 
11919  return finalRes;
11920 }
11921 
11922 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
11923 {
11924  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
11925  (*pAllocation)->InitLost();
11926 }
11927 
11928 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
11929 {
11930  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
11931 
11932  VkResult res;
11933  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11934  {
11935  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11936  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
11937  {
11938  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11939  if(res == VK_SUCCESS)
11940  {
11941  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
11942  }
11943  }
11944  else
11945  {
11946  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
11947  }
11948  }
11949  else
11950  {
11951  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11952  }
11953 
11954  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
11955  {
11956  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
11957  }
11958 
11959  return res;
11960 }
11961 
11962 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
11963 {
11964  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
11965  {
11966  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
11967  }
11968 
11969  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
11970 
11971  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
11972  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11973  {
11974  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11975  m_HeapSizeLimit[heapIndex] += size;
11976  }
11977 }
11978 
11979 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
11980 {
11981  if(hAllocation->CanBecomeLost())
11982  {
11983  return VK_ERROR_MEMORY_MAP_FAILED;
11984  }
11985 
11986  switch(hAllocation->GetType())
11987  {
11988  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11989  {
11990  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
11991  char *pBytes = VMA_NULL;
11992  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
11993  if(res == VK_SUCCESS)
11994  {
11995  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
11996  hAllocation->BlockAllocMap();
11997  }
11998  return res;
11999  }
12000  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12001  return hAllocation->DedicatedAllocMap(this, ppData);
12002  default:
12003  VMA_ASSERT(0);
12004  return VK_ERROR_MEMORY_MAP_FAILED;
12005  }
12006 }
12007 
12008 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12009 {
12010  switch(hAllocation->GetType())
12011  {
12012  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12013  {
12014  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12015  hAllocation->BlockAllocUnmap();
12016  pBlock->Unmap(this, 1);
12017  }
12018  break;
12019  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12020  hAllocation->DedicatedAllocUnmap(this);
12021  break;
12022  default:
12023  VMA_ASSERT(0);
12024  }
12025 }
12026 
12027 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12028 {
12029  VkResult res = VK_SUCCESS;
12030  switch(hAllocation->GetType())
12031  {
12032  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12033  res = GetVulkanFunctions().vkBindBufferMemory(
12034  m_hDevice,
12035  hBuffer,
12036  hAllocation->GetMemory(),
12037  0); //memoryOffset
12038  break;
12039  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12040  {
12041  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12042  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12043  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12044  break;
12045  }
12046  default:
12047  VMA_ASSERT(0);
12048  }
12049  return res;
12050 }
12051 
12052 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12053 {
12054  VkResult res = VK_SUCCESS;
12055  switch(hAllocation->GetType())
12056  {
12057  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12058  res = GetVulkanFunctions().vkBindImageMemory(
12059  m_hDevice,
12060  hImage,
12061  hAllocation->GetMemory(),
12062  0); //memoryOffset
12063  break;
12064  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12065  {
12066  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12067  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
12068  res = pBlock->BindImageMemory(this, hAllocation, hImage);
12069  break;
12070  }
12071  default:
12072  VMA_ASSERT(0);
12073  }
12074  return res;
12075 }
12076 
12077 void VmaAllocator_T::FlushOrInvalidateAllocation(
12078  VmaAllocation hAllocation,
12079  VkDeviceSize offset, VkDeviceSize size,
12080  VMA_CACHE_OPERATION op)
12081 {
12082  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
12083  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
12084  {
12085  const VkDeviceSize allocationSize = hAllocation->GetSize();
12086  VMA_ASSERT(offset <= allocationSize);
12087 
12088  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12089 
12090  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12091  memRange.memory = hAllocation->GetMemory();
12092 
12093  switch(hAllocation->GetType())
12094  {
12095  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12096  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12097  if(size == VK_WHOLE_SIZE)
12098  {
12099  memRange.size = allocationSize - memRange.offset;
12100  }
12101  else
12102  {
12103  VMA_ASSERT(offset + size <= allocationSize);
12104  memRange.size = VMA_MIN(
12105  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
12106  allocationSize - memRange.offset);
12107  }
12108  break;
12109 
12110  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12111  {
12112  // 1. Still within this allocation.
12113  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12114  if(size == VK_WHOLE_SIZE)
12115  {
12116  size = allocationSize - offset;
12117  }
12118  else
12119  {
12120  VMA_ASSERT(offset + size <= allocationSize);
12121  }
12122  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
12123 
12124  // 2. Adjust to whole block.
12125  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
12126  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
12127  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
12128  memRange.offset += allocationOffset;
12129  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
12130 
12131  break;
12132  }
12133 
12134  default:
12135  VMA_ASSERT(0);
12136  }
12137 
12138  switch(op)
12139  {
12140  case VMA_CACHE_FLUSH:
12141  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
12142  break;
12143  case VMA_CACHE_INVALIDATE:
12144  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
12145  break;
12146  default:
12147  VMA_ASSERT(0);
12148  }
12149  }
12150  // else: Just ignore this call.
12151 }
12152 
12153 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
12154 {
12155  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
12156 
12157  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12158  {
12159  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12160  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12161  VMA_ASSERT(pDedicatedAllocations);
12162  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
12163  VMA_ASSERT(success);
12164  }
12165 
12166  VkDeviceMemory hMemory = allocation->GetMemory();
12167 
12168  if(allocation->GetMappedData() != VMA_NULL)
12169  {
12170  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
12171  }
12172 
12173  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
12174 
12175  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
12176 }
12177 
12178 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
12179 {
12180  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
12181  !hAllocation->CanBecomeLost() &&
12182  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12183  {
12184  void* pData = VMA_NULL;
12185  VkResult res = Map(hAllocation, &pData);
12186  if(res == VK_SUCCESS)
12187  {
12188  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
12189  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
12190  Unmap(hAllocation);
12191  }
12192  else
12193  {
12194  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
12195  }
12196  }
12197 }
12198 
12199 #if VMA_STATS_STRING_ENABLED
12200 
12201 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
12202 {
12203  bool dedicatedAllocationsStarted = false;
12204  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12205  {
12206  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12207  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12208  VMA_ASSERT(pDedicatedAllocVector);
12209  if(pDedicatedAllocVector->empty() == false)
12210  {
12211  if(dedicatedAllocationsStarted == false)
12212  {
12213  dedicatedAllocationsStarted = true;
12214  json.WriteString("DedicatedAllocations");
12215  json.BeginObject();
12216  }
12217 
12218  json.BeginString("Type ");
12219  json.ContinueString(memTypeIndex);
12220  json.EndString();
12221 
12222  json.BeginArray();
12223 
12224  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
12225  {
12226  json.BeginObject(true);
12227  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
12228  hAlloc->PrintParameters(json);
12229  json.EndObject();
12230  }
12231 
12232  json.EndArray();
12233  }
12234  }
12235  if(dedicatedAllocationsStarted)
12236  {
12237  json.EndObject();
12238  }
12239 
12240  {
12241  bool allocationsStarted = false;
12242  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12243  {
12244  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
12245  {
12246  if(allocationsStarted == false)
12247  {
12248  allocationsStarted = true;
12249  json.WriteString("DefaultPools");
12250  json.BeginObject();
12251  }
12252 
12253  json.BeginString("Type ");
12254  json.ContinueString(memTypeIndex);
12255  json.EndString();
12256 
12257  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
12258  }
12259  }
12260  if(allocationsStarted)
12261  {
12262  json.EndObject();
12263  }
12264  }
12265 
12266  // Custom pools
12267  {
12268  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12269  const size_t poolCount = m_Pools.size();
12270  if(poolCount > 0)
12271  {
12272  json.WriteString("Pools");
12273  json.BeginObject();
12274  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
12275  {
12276  json.BeginString();
12277  json.ContinueString(m_Pools[poolIndex]->GetId());
12278  json.EndString();
12279 
12280  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
12281  }
12282  json.EndObject();
12283  }
12284  }
12285 }
12286 
12287 #endif // #if VMA_STATS_STRING_ENABLED
12288 
12290 // Public interface
12291 
12292 VkResult vmaCreateAllocator(
12293  const VmaAllocatorCreateInfo* pCreateInfo,
12294  VmaAllocator* pAllocator)
12295 {
12296  VMA_ASSERT(pCreateInfo && pAllocator);
12297  VMA_DEBUG_LOG("vmaCreateAllocator");
12298  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
12299  return (*pAllocator)->Init(pCreateInfo);
12300 }
12301 
12302 void vmaDestroyAllocator(
12303  VmaAllocator allocator)
12304 {
12305  if(allocator != VK_NULL_HANDLE)
12306  {
12307  VMA_DEBUG_LOG("vmaDestroyAllocator");
12308  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
12309  vma_delete(&allocationCallbacks, allocator);
12310  }
12311 }
12312 
12314  VmaAllocator allocator,
12315  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
12316 {
12317  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
12318  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
12319 }
12320 
12322  VmaAllocator allocator,
12323  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
12324 {
12325  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
12326  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
12327 }
12328 
12330  VmaAllocator allocator,
12331  uint32_t memoryTypeIndex,
12332  VkMemoryPropertyFlags* pFlags)
12333 {
12334  VMA_ASSERT(allocator && pFlags);
12335  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
12336  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
12337 }
12338 
12340  VmaAllocator allocator,
12341  uint32_t frameIndex)
12342 {
12343  VMA_ASSERT(allocator);
12344  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
12345 
12346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12347 
12348  allocator->SetCurrentFrameIndex(frameIndex);
12349 }
12350 
12351 void vmaCalculateStats(
12352  VmaAllocator allocator,
12353  VmaStats* pStats)
12354 {
12355  VMA_ASSERT(allocator && pStats);
12356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12357  allocator->CalculateStats(pStats);
12358 }
12359 
12360 #if VMA_STATS_STRING_ENABLED
12361 
12362 void vmaBuildStatsString(
12363  VmaAllocator allocator,
12364  char** ppStatsString,
12365  VkBool32 detailedMap)
12366 {
12367  VMA_ASSERT(allocator && ppStatsString);
12368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12369 
12370  VmaStringBuilder sb(allocator);
12371  {
12372  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
12373  json.BeginObject();
12374 
12375  VmaStats stats;
12376  allocator->CalculateStats(&stats);
12377 
12378  json.WriteString("Total");
12379  VmaPrintStatInfo(json, stats.total);
12380 
12381  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
12382  {
12383  json.BeginString("Heap ");
12384  json.ContinueString(heapIndex);
12385  json.EndString();
12386  json.BeginObject();
12387 
12388  json.WriteString("Size");
12389  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
12390 
12391  json.WriteString("Flags");
12392  json.BeginArray(true);
12393  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
12394  {
12395  json.WriteString("DEVICE_LOCAL");
12396  }
12397  json.EndArray();
12398 
12399  if(stats.memoryHeap[heapIndex].blockCount > 0)
12400  {
12401  json.WriteString("Stats");
12402  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
12403  }
12404 
12405  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
12406  {
12407  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
12408  {
12409  json.BeginString("Type ");
12410  json.ContinueString(typeIndex);
12411  json.EndString();
12412 
12413  json.BeginObject();
12414 
12415  json.WriteString("Flags");
12416  json.BeginArray(true);
12417  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
12418  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
12419  {
12420  json.WriteString("DEVICE_LOCAL");
12421  }
12422  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12423  {
12424  json.WriteString("HOST_VISIBLE");
12425  }
12426  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
12427  {
12428  json.WriteString("HOST_COHERENT");
12429  }
12430  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
12431  {
12432  json.WriteString("HOST_CACHED");
12433  }
12434  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
12435  {
12436  json.WriteString("LAZILY_ALLOCATED");
12437  }
12438  json.EndArray();
12439 
12440  if(stats.memoryType[typeIndex].blockCount > 0)
12441  {
12442  json.WriteString("Stats");
12443  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
12444  }
12445 
12446  json.EndObject();
12447  }
12448  }
12449 
12450  json.EndObject();
12451  }
12452  if(detailedMap == VK_TRUE)
12453  {
12454  allocator->PrintDetailedMap(json);
12455  }
12456 
12457  json.EndObject();
12458  }
12459 
12460  const size_t len = sb.GetLength();
12461  char* const pChars = vma_new_array(allocator, char, len + 1);
12462  if(len > 0)
12463  {
12464  memcpy(pChars, sb.GetData(), len);
12465  }
12466  pChars[len] = '\0';
12467  *ppStatsString = pChars;
12468 }
12469 
12470 void vmaFreeStatsString(
12471  VmaAllocator allocator,
12472  char* pStatsString)
12473 {
12474  if(pStatsString != VMA_NULL)
12475  {
12476  VMA_ASSERT(allocator);
12477  size_t len = strlen(pStatsString);
12478  vma_delete_array(allocator, pStatsString, len + 1);
12479  }
12480 }
12481 
12482 #endif // #if VMA_STATS_STRING_ENABLED
12483 
12484 /*
12485 This function is not protected by any mutex because it just reads immutable data.
12486 */
12487 VkResult vmaFindMemoryTypeIndex(
12488  VmaAllocator allocator,
12489  uint32_t memoryTypeBits,
12490  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12491  uint32_t* pMemoryTypeIndex)
12492 {
12493  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12494  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12495  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12496 
12497  if(pAllocationCreateInfo->memoryTypeBits != 0)
12498  {
12499  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
12500  }
12501 
12502  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
12503  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
12504 
12505  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12506  if(mapped)
12507  {
12508  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12509  }
12510 
12511  // Convert usage to requiredFlags and preferredFlags.
12512  switch(pAllocationCreateInfo->usage)
12513  {
12515  break;
12517  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12518  {
12519  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12520  }
12521  break;
12523  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12524  break;
12526  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12527  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12528  {
12529  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12530  }
12531  break;
12533  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12534  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
12535  break;
12536  default:
12537  break;
12538  }
12539 
12540  *pMemoryTypeIndex = UINT32_MAX;
12541  uint32_t minCost = UINT32_MAX;
12542  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
12543  memTypeIndex < allocator->GetMemoryTypeCount();
12544  ++memTypeIndex, memTypeBit <<= 1)
12545  {
12546  // This memory type is acceptable according to memoryTypeBits bitmask.
12547  if((memTypeBit & memoryTypeBits) != 0)
12548  {
12549  const VkMemoryPropertyFlags currFlags =
12550  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
12551  // This memory type contains requiredFlags.
12552  if((requiredFlags & ~currFlags) == 0)
12553  {
12554  // Calculate cost as number of bits from preferredFlags not present in this memory type.
12555  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
12556  // Remember memory type with lowest cost.
12557  if(currCost < minCost)
12558  {
12559  *pMemoryTypeIndex = memTypeIndex;
12560  if(currCost == 0)
12561  {
12562  return VK_SUCCESS;
12563  }
12564  minCost = currCost;
12565  }
12566  }
12567  }
12568  }
12569  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
12570 }
12571 
12573  VmaAllocator allocator,
12574  const VkBufferCreateInfo* pBufferCreateInfo,
12575  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12576  uint32_t* pMemoryTypeIndex)
12577 {
12578  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12579  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
12580  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12581  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12582 
12583  const VkDevice hDev = allocator->m_hDevice;
12584  VkBuffer hBuffer = VK_NULL_HANDLE;
12585  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
12586  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
12587  if(res == VK_SUCCESS)
12588  {
12589  VkMemoryRequirements memReq = {};
12590  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
12591  hDev, hBuffer, &memReq);
12592 
12593  res = vmaFindMemoryTypeIndex(
12594  allocator,
12595  memReq.memoryTypeBits,
12596  pAllocationCreateInfo,
12597  pMemoryTypeIndex);
12598 
12599  allocator->GetVulkanFunctions().vkDestroyBuffer(
12600  hDev, hBuffer, allocator->GetAllocationCallbacks());
12601  }
12602  return res;
12603 }
12604 
12606  VmaAllocator allocator,
12607  const VkImageCreateInfo* pImageCreateInfo,
12608  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12609  uint32_t* pMemoryTypeIndex)
12610 {
12611  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12612  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
12613  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12614  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12615 
12616  const VkDevice hDev = allocator->m_hDevice;
12617  VkImage hImage = VK_NULL_HANDLE;
12618  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
12619  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
12620  if(res == VK_SUCCESS)
12621  {
12622  VkMemoryRequirements memReq = {};
12623  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
12624  hDev, hImage, &memReq);
12625 
12626  res = vmaFindMemoryTypeIndex(
12627  allocator,
12628  memReq.memoryTypeBits,
12629  pAllocationCreateInfo,
12630  pMemoryTypeIndex);
12631 
12632  allocator->GetVulkanFunctions().vkDestroyImage(
12633  hDev, hImage, allocator->GetAllocationCallbacks());
12634  }
12635  return res;
12636 }
12637 
12638 VkResult vmaCreatePool(
12639  VmaAllocator allocator,
12640  const VmaPoolCreateInfo* pCreateInfo,
12641  VmaPool* pPool)
12642 {
12643  VMA_ASSERT(allocator && pCreateInfo && pPool);
12644 
12645  VMA_DEBUG_LOG("vmaCreatePool");
12646 
12647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12648 
12649  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
12650 
12651 #if VMA_RECORDING_ENABLED
12652  if(allocator->GetRecorder() != VMA_NULL)
12653  {
12654  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
12655  }
12656 #endif
12657 
12658  return res;
12659 }
12660 
12661 void vmaDestroyPool(
12662  VmaAllocator allocator,
12663  VmaPool pool)
12664 {
12665  VMA_ASSERT(allocator);
12666 
12667  if(pool == VK_NULL_HANDLE)
12668  {
12669  return;
12670  }
12671 
12672  VMA_DEBUG_LOG("vmaDestroyPool");
12673 
12674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12675 
12676 #if VMA_RECORDING_ENABLED
12677  if(allocator->GetRecorder() != VMA_NULL)
12678  {
12679  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
12680  }
12681 #endif
12682 
12683  allocator->DestroyPool(pool);
12684 }
12685 
12686 void vmaGetPoolStats(
12687  VmaAllocator allocator,
12688  VmaPool pool,
12689  VmaPoolStats* pPoolStats)
12690 {
12691  VMA_ASSERT(allocator && pool && pPoolStats);
12692 
12693  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12694 
12695  allocator->GetPoolStats(pool, pPoolStats);
12696 }
12697 
12699  VmaAllocator allocator,
12700  VmaPool pool,
12701  size_t* pLostAllocationCount)
12702 {
12703  VMA_ASSERT(allocator && pool);
12704 
12705  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12706 
12707 #if VMA_RECORDING_ENABLED
12708  if(allocator->GetRecorder() != VMA_NULL)
12709  {
12710  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
12711  }
12712 #endif
12713 
12714  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
12715 }
12716 
12717 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
12718 {
12719  VMA_ASSERT(allocator && pool);
12720 
12721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12722 
12723  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
12724 
12725  return allocator->CheckPoolCorruption(pool);
12726 }
12727 
12728 VkResult vmaAllocateMemory(
12729  VmaAllocator allocator,
12730  const VkMemoryRequirements* pVkMemoryRequirements,
12731  const VmaAllocationCreateInfo* pCreateInfo,
12732  VmaAllocation* pAllocation,
12733  VmaAllocationInfo* pAllocationInfo)
12734 {
12735  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
12736 
12737  VMA_DEBUG_LOG("vmaAllocateMemory");
12738 
12739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12740 
12741  VkResult result = allocator->AllocateMemory(
12742  *pVkMemoryRequirements,
12743  false, // requiresDedicatedAllocation
12744  false, // prefersDedicatedAllocation
12745  VK_NULL_HANDLE, // dedicatedBuffer
12746  VK_NULL_HANDLE, // dedicatedImage
12747  *pCreateInfo,
12748  VMA_SUBALLOCATION_TYPE_UNKNOWN,
12749  pAllocation);
12750 
12751 #if VMA_RECORDING_ENABLED
12752  if(allocator->GetRecorder() != VMA_NULL)
12753  {
12754  allocator->GetRecorder()->RecordAllocateMemory(
12755  allocator->GetCurrentFrameIndex(),
12756  *pVkMemoryRequirements,
12757  *pCreateInfo,
12758  *pAllocation);
12759  }
12760 #endif
12761 
12762  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
12763  {
12764  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12765  }
12766 
12767  return result;
12768 }
12769 
12771  VmaAllocator allocator,
12772  VkBuffer buffer,
12773  const VmaAllocationCreateInfo* pCreateInfo,
12774  VmaAllocation* pAllocation,
12775  VmaAllocationInfo* pAllocationInfo)
12776 {
12777  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12778 
12779  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
12780 
12781  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12782 
12783  VkMemoryRequirements vkMemReq = {};
12784  bool requiresDedicatedAllocation = false;
12785  bool prefersDedicatedAllocation = false;
12786  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
12787  requiresDedicatedAllocation,
12788  prefersDedicatedAllocation);
12789 
12790  VkResult result = allocator->AllocateMemory(
12791  vkMemReq,
12792  requiresDedicatedAllocation,
12793  prefersDedicatedAllocation,
12794  buffer, // dedicatedBuffer
12795  VK_NULL_HANDLE, // dedicatedImage
12796  *pCreateInfo,
12797  VMA_SUBALLOCATION_TYPE_BUFFER,
12798  pAllocation);
12799 
12800 #if VMA_RECORDING_ENABLED
12801  if(allocator->GetRecorder() != VMA_NULL)
12802  {
12803  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
12804  allocator->GetCurrentFrameIndex(),
12805  vkMemReq,
12806  requiresDedicatedAllocation,
12807  prefersDedicatedAllocation,
12808  *pCreateInfo,
12809  *pAllocation);
12810  }
12811 #endif
12812 
12813  if(pAllocationInfo && result == VK_SUCCESS)
12814  {
12815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12816  }
12817 
12818  return result;
12819 }
12820 
12821 VkResult vmaAllocateMemoryForImage(
12822  VmaAllocator allocator,
12823  VkImage image,
12824  const VmaAllocationCreateInfo* pCreateInfo,
12825  VmaAllocation* pAllocation,
12826  VmaAllocationInfo* pAllocationInfo)
12827 {
12828  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12829 
12830  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
12831 
12832  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12833 
12834  VkMemoryRequirements vkMemReq = {};
12835  bool requiresDedicatedAllocation = false;
12836  bool prefersDedicatedAllocation = false;
12837  allocator->GetImageMemoryRequirements(image, vkMemReq,
12838  requiresDedicatedAllocation, prefersDedicatedAllocation);
12839 
12840  VkResult result = allocator->AllocateMemory(
12841  vkMemReq,
12842  requiresDedicatedAllocation,
12843  prefersDedicatedAllocation,
12844  VK_NULL_HANDLE, // dedicatedBuffer
12845  image, // dedicatedImage
12846  *pCreateInfo,
12847  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
12848  pAllocation);
12849 
12850 #if VMA_RECORDING_ENABLED
12851  if(allocator->GetRecorder() != VMA_NULL)
12852  {
12853  allocator->GetRecorder()->RecordAllocateMemoryForImage(
12854  allocator->GetCurrentFrameIndex(),
12855  vkMemReq,
12856  requiresDedicatedAllocation,
12857  prefersDedicatedAllocation,
12858  *pCreateInfo,
12859  *pAllocation);
12860  }
12861 #endif
12862 
12863  if(pAllocationInfo && result == VK_SUCCESS)
12864  {
12865  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12866  }
12867 
12868  return result;
12869 }
12870 
12871 void vmaFreeMemory(
12872  VmaAllocator allocator,
12873  VmaAllocation allocation)
12874 {
12875  VMA_ASSERT(allocator);
12876 
12877  if(allocation == VK_NULL_HANDLE)
12878  {
12879  return;
12880  }
12881 
12882  VMA_DEBUG_LOG("vmaFreeMemory");
12883 
12884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12885 
12886 #if VMA_RECORDING_ENABLED
12887  if(allocator->GetRecorder() != VMA_NULL)
12888  {
12889  allocator->GetRecorder()->RecordFreeMemory(
12890  allocator->GetCurrentFrameIndex(),
12891  allocation);
12892  }
12893 #endif
12894 
12895  allocator->FreeMemory(allocation);
12896 }
12897 
12899  VmaAllocator allocator,
12900  VmaAllocation allocation,
12901  VmaAllocationInfo* pAllocationInfo)
12902 {
12903  VMA_ASSERT(allocator && allocation && pAllocationInfo);
12904 
12905  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12906 
12907 #if VMA_RECORDING_ENABLED
12908  if(allocator->GetRecorder() != VMA_NULL)
12909  {
12910  allocator->GetRecorder()->RecordGetAllocationInfo(
12911  allocator->GetCurrentFrameIndex(),
12912  allocation);
12913  }
12914 #endif
12915 
12916  allocator->GetAllocationInfo(allocation, pAllocationInfo);
12917 }
12918 
12919 VkBool32 vmaTouchAllocation(
12920  VmaAllocator allocator,
12921  VmaAllocation allocation)
12922 {
12923  VMA_ASSERT(allocator && allocation);
12924 
12925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12926 
12927 #if VMA_RECORDING_ENABLED
12928  if(allocator->GetRecorder() != VMA_NULL)
12929  {
12930  allocator->GetRecorder()->RecordTouchAllocation(
12931  allocator->GetCurrentFrameIndex(),
12932  allocation);
12933  }
12934 #endif
12935 
12936  return allocator->TouchAllocation(allocation);
12937 }
12938 
12940  VmaAllocator allocator,
12941  VmaAllocation allocation,
12942  void* pUserData)
12943 {
12944  VMA_ASSERT(allocator && allocation);
12945 
12946  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12947 
12948  allocation->SetUserData(allocator, pUserData);
12949 
12950 #if VMA_RECORDING_ENABLED
12951  if(allocator->GetRecorder() != VMA_NULL)
12952  {
12953  allocator->GetRecorder()->RecordSetAllocationUserData(
12954  allocator->GetCurrentFrameIndex(),
12955  allocation,
12956  pUserData);
12957  }
12958 #endif
12959 }
12960 
12962  VmaAllocator allocator,
12963  VmaAllocation* pAllocation)
12964 {
12965  VMA_ASSERT(allocator && pAllocation);
12966 
12967  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
12968 
12969  allocator->CreateLostAllocation(pAllocation);
12970 
12971 #if VMA_RECORDING_ENABLED
12972  if(allocator->GetRecorder() != VMA_NULL)
12973  {
12974  allocator->GetRecorder()->RecordCreateLostAllocation(
12975  allocator->GetCurrentFrameIndex(),
12976  *pAllocation);
12977  }
12978 #endif
12979 }
12980 
12981 VkResult vmaMapMemory(
12982  VmaAllocator allocator,
12983  VmaAllocation allocation,
12984  void** ppData)
12985 {
12986  VMA_ASSERT(allocator && allocation && ppData);
12987 
12988  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12989 
12990  VkResult res = allocator->Map(allocation, ppData);
12991 
12992 #if VMA_RECORDING_ENABLED
12993  if(allocator->GetRecorder() != VMA_NULL)
12994  {
12995  allocator->GetRecorder()->RecordMapMemory(
12996  allocator->GetCurrentFrameIndex(),
12997  allocation);
12998  }
12999 #endif
13000 
13001  return res;
13002 }
13003 
13004 void vmaUnmapMemory(
13005  VmaAllocator allocator,
13006  VmaAllocation allocation)
13007 {
13008  VMA_ASSERT(allocator && allocation);
13009 
13010  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13011 
13012 #if VMA_RECORDING_ENABLED
13013  if(allocator->GetRecorder() != VMA_NULL)
13014  {
13015  allocator->GetRecorder()->RecordUnmapMemory(
13016  allocator->GetCurrentFrameIndex(),
13017  allocation);
13018  }
13019 #endif
13020 
13021  allocator->Unmap(allocation);
13022 }
13023 
13024 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13025 {
13026  VMA_ASSERT(allocator && allocation);
13027 
13028  VMA_DEBUG_LOG("vmaFlushAllocation");
13029 
13030  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13031 
13032  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13033 
13034 #if VMA_RECORDING_ENABLED
13035  if(allocator->GetRecorder() != VMA_NULL)
13036  {
13037  allocator->GetRecorder()->RecordFlushAllocation(
13038  allocator->GetCurrentFrameIndex(),
13039  allocation, offset, size);
13040  }
13041 #endif
13042 }
13043 
13044 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13045 {
13046  VMA_ASSERT(allocator && allocation);
13047 
13048  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13049 
13050  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13051 
13052  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13053 
13054 #if VMA_RECORDING_ENABLED
13055  if(allocator->GetRecorder() != VMA_NULL)
13056  {
13057  allocator->GetRecorder()->RecordInvalidateAllocation(
13058  allocator->GetCurrentFrameIndex(),
13059  allocation, offset, size);
13060  }
13061 #endif
13062 }
13063 
13064 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
13065 {
13066  VMA_ASSERT(allocator);
13067 
13068  VMA_DEBUG_LOG("vmaCheckCorruption");
13069 
13070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13071 
13072  return allocator->CheckCorruption(memoryTypeBits);
13073 }
13074 
13075 VkResult vmaDefragment(
13076  VmaAllocator allocator,
13077  VmaAllocation* pAllocations,
13078  size_t allocationCount,
13079  VkBool32* pAllocationsChanged,
13080  const VmaDefragmentationInfo *pDefragmentationInfo,
13081  VmaDefragmentationStats* pDefragmentationStats)
13082 {
13083  VMA_ASSERT(allocator && pAllocations);
13084 
13085  VMA_DEBUG_LOG("vmaDefragment");
13086 
13087  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13088 
13089  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
13090 }
13091 
13092 VkResult vmaBindBufferMemory(
13093  VmaAllocator allocator,
13094  VmaAllocation allocation,
13095  VkBuffer buffer)
13096 {
13097  VMA_ASSERT(allocator && allocation && buffer);
13098 
13099  VMA_DEBUG_LOG("vmaBindBufferMemory");
13100 
13101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13102 
13103  return allocator->BindBufferMemory(allocation, buffer);
13104 }
13105 
13106 VkResult vmaBindImageMemory(
13107  VmaAllocator allocator,
13108  VmaAllocation allocation,
13109  VkImage image)
13110 {
13111  VMA_ASSERT(allocator && allocation && image);
13112 
13113  VMA_DEBUG_LOG("vmaBindImageMemory");
13114 
13115  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13116 
13117  return allocator->BindImageMemory(allocation, image);
13118 }
13119 
13120 VkResult vmaCreateBuffer(
13121  VmaAllocator allocator,
13122  const VkBufferCreateInfo* pBufferCreateInfo,
13123  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13124  VkBuffer* pBuffer,
13125  VmaAllocation* pAllocation,
13126  VmaAllocationInfo* pAllocationInfo)
13127 {
13128  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
13129 
13130  VMA_DEBUG_LOG("vmaCreateBuffer");
13131 
13132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13133 
13134  *pBuffer = VK_NULL_HANDLE;
13135  *pAllocation = VK_NULL_HANDLE;
13136 
13137  // 1. Create VkBuffer.
13138  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
13139  allocator->m_hDevice,
13140  pBufferCreateInfo,
13141  allocator->GetAllocationCallbacks(),
13142  pBuffer);
13143  if(res >= 0)
13144  {
13145  // 2. vkGetBufferMemoryRequirements.
13146  VkMemoryRequirements vkMemReq = {};
13147  bool requiresDedicatedAllocation = false;
13148  bool prefersDedicatedAllocation = false;
13149  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
13150  requiresDedicatedAllocation, prefersDedicatedAllocation);
13151 
13152  // Make sure alignment requirements for specific buffer usages reported
13153  // in Physical Device Properties are included in alignment reported by memory requirements.
13154  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
13155  {
13156  VMA_ASSERT(vkMemReq.alignment %
13157  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
13158  }
13159  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
13160  {
13161  VMA_ASSERT(vkMemReq.alignment %
13162  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
13163  }
13164  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
13165  {
13166  VMA_ASSERT(vkMemReq.alignment %
13167  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
13168  }
13169 
13170  // 3. Allocate memory using allocator.
13171  res = allocator->AllocateMemory(
13172  vkMemReq,
13173  requiresDedicatedAllocation,
13174  prefersDedicatedAllocation,
13175  *pBuffer, // dedicatedBuffer
13176  VK_NULL_HANDLE, // dedicatedImage
13177  *pAllocationCreateInfo,
13178  VMA_SUBALLOCATION_TYPE_BUFFER,
13179  pAllocation);
13180 
13181 #if VMA_RECORDING_ENABLED
13182  if(allocator->GetRecorder() != VMA_NULL)
13183  {
13184  allocator->GetRecorder()->RecordCreateBuffer(
13185  allocator->GetCurrentFrameIndex(),
13186  *pBufferCreateInfo,
13187  *pAllocationCreateInfo,
13188  *pAllocation);
13189  }
13190 #endif
13191 
13192  if(res >= 0)
13193  {
13194  // 3. Bind buffer with memory.
13195  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
13196  if(res >= 0)
13197  {
13198  // All steps succeeded.
13199  #if VMA_STATS_STRING_ENABLED
13200  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
13201  #endif
13202  if(pAllocationInfo != VMA_NULL)
13203  {
13204  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13205  }
13206 
13207  return VK_SUCCESS;
13208  }
13209  allocator->FreeMemory(*pAllocation);
13210  *pAllocation = VK_NULL_HANDLE;
13211  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13212  *pBuffer = VK_NULL_HANDLE;
13213  return res;
13214  }
13215  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13216  *pBuffer = VK_NULL_HANDLE;
13217  return res;
13218  }
13219  return res;
13220 }
13221 
13222 void vmaDestroyBuffer(
13223  VmaAllocator allocator,
13224  VkBuffer buffer,
13225  VmaAllocation allocation)
13226 {
13227  VMA_ASSERT(allocator);
13228 
13229  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13230  {
13231  return;
13232  }
13233 
13234  VMA_DEBUG_LOG("vmaDestroyBuffer");
13235 
13236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13237 
13238 #if VMA_RECORDING_ENABLED
13239  if(allocator->GetRecorder() != VMA_NULL)
13240  {
13241  allocator->GetRecorder()->RecordDestroyBuffer(
13242  allocator->GetCurrentFrameIndex(),
13243  allocation);
13244  }
13245 #endif
13246 
13247  if(buffer != VK_NULL_HANDLE)
13248  {
13249  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
13250  }
13251 
13252  if(allocation != VK_NULL_HANDLE)
13253  {
13254  allocator->FreeMemory(allocation);
13255  }
13256 }
13257 
13258 VkResult vmaCreateImage(
13259  VmaAllocator allocator,
13260  const VkImageCreateInfo* pImageCreateInfo,
13261  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13262  VkImage* pImage,
13263  VmaAllocation* pAllocation,
13264  VmaAllocationInfo* pAllocationInfo)
13265 {
13266  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
13267 
13268  VMA_DEBUG_LOG("vmaCreateImage");
13269 
13270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13271 
13272  *pImage = VK_NULL_HANDLE;
13273  *pAllocation = VK_NULL_HANDLE;
13274 
13275  // 1. Create VkImage.
13276  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
13277  allocator->m_hDevice,
13278  pImageCreateInfo,
13279  allocator->GetAllocationCallbacks(),
13280  pImage);
13281  if(res >= 0)
13282  {
13283  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
13284  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
13285  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
13286 
13287  // 2. Allocate memory using allocator.
13288  VkMemoryRequirements vkMemReq = {};
13289  bool requiresDedicatedAllocation = false;
13290  bool prefersDedicatedAllocation = false;
13291  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
13292  requiresDedicatedAllocation, prefersDedicatedAllocation);
13293 
13294  res = allocator->AllocateMemory(
13295  vkMemReq,
13296  requiresDedicatedAllocation,
13297  prefersDedicatedAllocation,
13298  VK_NULL_HANDLE, // dedicatedBuffer
13299  *pImage, // dedicatedImage
13300  *pAllocationCreateInfo,
13301  suballocType,
13302  pAllocation);
13303 
13304 #if VMA_RECORDING_ENABLED
13305  if(allocator->GetRecorder() != VMA_NULL)
13306  {
13307  allocator->GetRecorder()->RecordCreateImage(
13308  allocator->GetCurrentFrameIndex(),
13309  *pImageCreateInfo,
13310  *pAllocationCreateInfo,
13311  *pAllocation);
13312  }
13313 #endif
13314 
13315  if(res >= 0)
13316  {
13317  // 3. Bind image with memory.
13318  res = allocator->BindImageMemory(*pAllocation, *pImage);
13319  if(res >= 0)
13320  {
13321  // All steps succeeded.
13322  #if VMA_STATS_STRING_ENABLED
13323  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
13324  #endif
13325  if(pAllocationInfo != VMA_NULL)
13326  {
13327  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13328  }
13329 
13330  return VK_SUCCESS;
13331  }
13332  allocator->FreeMemory(*pAllocation);
13333  *pAllocation = VK_NULL_HANDLE;
13334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13335  *pImage = VK_NULL_HANDLE;
13336  return res;
13337  }
13338  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13339  *pImage = VK_NULL_HANDLE;
13340  return res;
13341  }
13342  return res;
13343 }
13344 
13345 void vmaDestroyImage(
13346  VmaAllocator allocator,
13347  VkImage image,
13348  VmaAllocation allocation)
13349 {
13350  VMA_ASSERT(allocator);
13351 
13352  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13353  {
13354  return;
13355  }
13356 
13357  VMA_DEBUG_LOG("vmaDestroyImage");
13358 
13359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13360 
13361 #if VMA_RECORDING_ENABLED
13362  if(allocator->GetRecorder() != VMA_NULL)
13363  {
13364  allocator->GetRecorder()->RecordDestroyImage(
13365  allocator->GetCurrentFrameIndex(),
13366  allocation);
13367  }
13368 #endif
13369 
13370  if(image != VK_NULL_HANDLE)
13371  {
13372  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
13373  }
13374  if(allocation != VK_NULL_HANDLE)
13375  {
13376  allocator->FreeMemory(allocation);
13377  }
13378 }
13379 
13380 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1429
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1742
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1498
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index e92873b..697b733 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -29,7 +29,7 @@ extern "C" { /** \mainpage Vulkan Memory Allocator -Version 2.1.0-alpha.4 (2018-08-22) +Version 2.1.0-beta.1 (2018-08-24) Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT